diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -255,6 +255,36 @@ defm "" : RVVUnsignedBinBuiltinSet; } +multiclass RVVSignedShiftBuiltinSet { + defm "" : RVVOutOp1BuiltinSet; +} + +multiclass RVVUnsignedShiftBuiltinSet { + defm "" : RVVOutOp1BuiltinSet; +} + +multiclass RVVShiftBuiltinSet { + defm "" : RVVSignedShiftBuiltinSet; + defm "" : RVVUnsignedShiftBuiltinSet; +} + +let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { + multiclass RVVSignedNShiftBuiltinSet { + defm "" : RVVOutOp0Op1BuiltinSet; + } + multiclass RVVUnsignedNShiftBuiltinSet { + defm "" : RVVOutOp0Op1BuiltinSet; + } +} + multiclass RVVIntTerBuiltinSet { defm "" : RVVOutOp1BuiltinSet; -defm vsrl : RVVOutOp1BuiltinSet<"vsrl", "csil", - [["vv", "Uv", "UvUvUv"], - ["vx", "Uv", "UvUvz"]]>; -defm vsra : RVVOutOp1BuiltinSet<"vsra", "csil", - [["vv", "v", "vvUv"], - ["vx", "v", "vvz"]]>; +defm vsll : RVVShiftBuiltinSet; +defm vsrl : RVVUnsignedShiftBuiltinSet; +defm vsra : RVVSignedShiftBuiltinSet; // 12.7. Vector Narrowing Integer Right Shift Instructions -let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { -defm vnsrl : RVVOutOp0Op1BuiltinSet<"vnsrl", "csi", - [["wv", "Uv", "UvUwUv"], - ["wx", "Uv", "UvUwz"]]>; -defm vnsra : RVVOutOp0Op1BuiltinSet<"vnsra", "csi", - [["wv", "v", "vwUv"], - ["wx", "v", "vwz"]]>; -} +defm vnsrl : RVVUnsignedNShiftBuiltinSet; +defm vnsra : RVVSignedNShiftBuiltinSet; // 12.8. Vector Integer Comparison Instructions defm vmseq : RVVIntMaskOutBuiltinSet; @@ -615,6 +631,30 @@ // 12.16. Vector Integer Move Instructions // TODO +// 13. Vector Fixed-Point Arithmetic Instructions +// 13.1. Vector Single-Width Saturating Add and Subtract +defm vsaddu : RVVUnsignedBinBuiltinSet; +defm vsadd : RVVSignedBinBuiltinSet; +defm vssubu : RVVUnsignedBinBuiltinSet; +defm vssub : RVVSignedBinBuiltinSet; + +// 13.2. Vector Single-Width Averaging Add and Subtract +defm vaaddu : RVVUnsignedBinBuiltinSet; +defm vaadd : RVVSignedBinBuiltinSet; +defm vasubu : RVVUnsignedBinBuiltinSet; +defm vasub : RVVSignedBinBuiltinSet; + +// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation +defm vsmul : RVVSignedBinBuiltinSet; + +// 13.4. Vector Single-Width Scaling Shift Instructions +defm vssrl : RVVUnsignedShiftBuiltinSet; +defm vssra : RVVSignedShiftBuiltinSet; + +// 13.5. Vector Narrowing Fixed-Point Clip Instructions +defm vnclipu : RVVUnsignedNShiftBuiltinSet; +defm vnclip : RVVSignedNShiftBuiltinSet; + // 14. Vector Floating-Point Instructions // 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions defm vfadd : RVVFloatingBinBuiltinSet; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vasub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vasub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -0,0 +1,1793 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { + return vnclip(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, + size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint16mf4_t op1, vuint8mf8_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint16mf2_t op1, vuint8mf4_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint16m1_t op1, vuint8mf2_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint16m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint16m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint16m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint16m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint32m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint32m2_t op1, vuint16m1_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint32m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint32m4_t op1, vuint16m2_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint32m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint32m8_t op1, vuint16m4_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint32m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint64m1_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint64m2_t op1, vuint32m1_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint64m2_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint64m4_t op1, vuint32m2_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint64m4_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint64m8_t op1, vuint32m4_t shift, + size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint64m8_t op1, size_t shift, size_t vl) { + return vnclip(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint16mf4_t op1, vuint8mf8_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint16mf2_t op1, vuint8mf4_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint16m1_t op1, vuint8mf2_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint16m2_t op1, vuint8m1_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint16m4_t op1, vuint8m2_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint16m8_t op1, vuint8m4_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint32mf2_t op1, size_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint32m1_t op1, size_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint32m2_t op1, vuint16m1_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint32m4_t op1, vuint16m2_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint32m8_t op1, vuint16m4_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint64m1_t op1, size_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint64m2_t op1, vuint32m1_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint64m4_t op1, vuint32m2_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint64m8_t op1, vuint32m4_t shift, + size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -0,0 +1,1287 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -0,0 +1,1290 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { + return vssra(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, size_t shift, size_t vl) { + return vssra(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -0,0 +1,1305 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t shift, + size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vssub(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vssub(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vaadd_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vaadd_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vaadd_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vaaddu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vaaddu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vaaddu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vaaddu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vaaddu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vaaddu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vaaddu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vaaddu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vaaddu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vaaddu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vaaddu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vaaddu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vaaddu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vaaddu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vaaddu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vaaddu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vaaddu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vaaddu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vaadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vaadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vaadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vaadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vaadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vaadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vaaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vaaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vaaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vaaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vaaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vaaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vaaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vaaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vaaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vaaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vaaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vaaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vaaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vaaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vaaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vaaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vaaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vaaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vaaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vaaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vaaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vaaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vaaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vaaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vaaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vaaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vaaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vaaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vaaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vasub_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vasub_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vasub_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vasubu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vasubu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vasubu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vasubu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vasubu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vasubu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vasubu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vasubu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vasubu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vasubu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vasubu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vasubu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vasubu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vasubu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vasubu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vasubu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vasubu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vasubu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vasub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vasub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vasub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vasub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vasub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vasub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vasub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vasub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vasub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vasub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vasub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vasub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vasub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vasub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vasub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vasub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vasub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vasub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vasub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vasub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vasub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vasub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vasub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vasub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vasub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vasub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vasubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vasubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vasubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vasubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vasubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vasubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vasubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vasubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vasubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vasubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vasubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vasubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vasubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vasubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vasubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vasubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vasubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vasubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vasubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vasubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vasubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vasubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vasubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vasubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vasubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vasubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vasubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vasubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vasubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c @@ -0,0 +1,1793 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { + return vnclip_wv_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { + return vnclip_wv_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { + return vnclip_wv_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip_wv_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip_wv_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip_wv_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclip_wv_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclip_wv_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { + return vnclip_wv_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { + return vnclip_wv_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { + return vnclip_wv_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclip_wv_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { + return vnclip_wv_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { + return vnclip_wv_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { + return vnclip_wv_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, + size_t vl) { + return vnclipu_wv_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, + size_t vl) { + return vnclipu_wv_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, + size_t vl) { + return vnclipu_wv_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclipu_wv_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclipu_wv_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclipu_wv_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclipu_wv_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclipu_wv_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, + size_t vl) { + return vnclipu_wv_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, + size_t vl) { + return vnclipu_wv_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, + size_t vl) { + return vnclipu_wv_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclipu_wv_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, + size_t vl) { + return vnclipu_wv_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, + size_t vl) { + return vnclipu_wv_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, + size_t vl) { + return vnclipu_wv_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint16mf4_t op1, vuint8mf8_t shift, + size_t vl) { + return vnclip_wv_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint16mf4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint16mf2_t op1, vuint8mf4_t shift, + size_t vl) { + return vnclip_wv_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint16mf2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint16m1_t op1, vuint8mf2_t shift, + size_t vl) { + return vnclip_wv_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint16m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint16m2_t op1, vuint8m1_t shift, size_t vl) { + return vnclip_wv_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint16m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint16m4_t op1, vuint8m2_t shift, size_t vl) { + return vnclip_wv_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint16m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint16m8_t op1, vuint8m4_t shift, size_t vl) { + return vnclip_wv_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint16m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclip_wv_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint32mf2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclip_wv_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint32m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint32m2_t op1, vuint16m1_t shift, + size_t vl) { + return vnclip_wv_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint32m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint32m4_t op1, vuint16m2_t shift, + size_t vl) { + return vnclip_wv_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint32m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint32m8_t op1, vuint16m4_t shift, + size_t vl) { + return vnclip_wv_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint32m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclip_wv_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint64m1_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint64m2_t op1, vuint32m1_t shift, + size_t vl) { + return vnclip_wv_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint64m2_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint64m4_t op1, vuint32m2_t shift, + size_t vl) { + return vnclip_wv_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint64m4_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint64m8_t op1, vuint32m4_t shift, + size_t vl) { + return vnclip_wv_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint64m8_t op1, size_t shift, size_t vl) { + return vnclip_wx_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint16mf4_t op1, vuint8mf8_t shift, + size_t vl) { + return vnclipu_wv_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint16mf4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint16mf2_t op1, vuint8mf4_t shift, + size_t vl) { + return vnclipu_wv_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint16mf2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint16m1_t op1, vuint8mf2_t shift, + size_t vl) { + return vnclipu_wv_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint16m1_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint16m2_t op1, vuint8m1_t shift, + size_t vl) { + return vnclipu_wv_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint16m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint16m4_t op1, vuint8m2_t shift, + size_t vl) { + return vnclipu_wv_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint16m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint16m8_t op1, vuint8m4_t shift, + size_t vl) { + return vnclipu_wv_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint16m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint32mf2_t op1, vuint16mf4_t shift, + size_t vl) { + return vnclipu_wv_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint32mf2_t op1, size_t shift, + size_t vl) { + return vnclipu_wx_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint32m1_t op1, vuint16mf2_t shift, + size_t vl) { + return vnclipu_wv_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint32m1_t op1, size_t shift, + size_t vl) { + return vnclipu_wx_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint32m2_t op1, vuint16m1_t shift, + size_t vl) { + return vnclipu_wv_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint32m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint32m4_t op1, vuint16m2_t shift, + size_t vl) { + return vnclipu_wv_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint32m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint32m8_t op1, vuint16m4_t shift, + size_t vl) { + return vnclipu_wv_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint32m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint64m1_t op1, vuint32mf2_t shift, + size_t vl) { + return vnclipu_wv_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint64m1_t op1, size_t shift, + size_t vl) { + return vnclipu_wx_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint64m2_t op1, vuint32m1_t shift, + size_t vl) { + return vnclipu_wv_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint64m2_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint64m4_t op1, vuint32m2_t shift, + size_t vl) { + return vnclipu_wv_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint64m4_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint64m8_t op1, vuint32m4_t shift, + size_t vl) { + return vnclipu_wv_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint64m8_t op1, size_t shift, size_t vl) { + return vnclipu_wx_u32m4_m(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsadd_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsadd_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsadd_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vsaddu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vsaddu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vsaddu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vsaddu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vsaddu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vsaddu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vsaddu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vsaddu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vsaddu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vsaddu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vsaddu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vsaddu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vsaddu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vsaddu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vsaddu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vsaddu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vsaddu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vsaddu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vsadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vsadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vsadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vsaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vsaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vsaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vsaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vsaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vsaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vsaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vsaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vsaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c @@ -0,0 +1,1287 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vsmul_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vsmul_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vsmul_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vsmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vsmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vsmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vsmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vsmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vsmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c @@ -0,0 +1,1290 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra_vv_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra_vv_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra_vv_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra_vv_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra_vv_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra_vv_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra_vv_i8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssra_vv_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { + return vssra_vx_i16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssra_vv_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra_vv_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra_vv_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra_vv_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra_vv_i16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssra_vv_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra_vv_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra_vv_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra_vv_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra_vv_i32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra_vv_i64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra_vv_i64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra_vv_i64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra_vv_i64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssra_vv_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssra_vv_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssra_vv_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssra_vv_i8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, size_t shift, size_t vl) { + return vssra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssra_vv_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssra_vv_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssra_vv_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssra_vv_i16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssra_vv_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssra_vv_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssra_vv_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssra_vv_i32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssra_vv_i64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssra_vv_i64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssra_vv_i64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssra_vv_i64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssra_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, size_t shift, size_t vl) { + return vssra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c @@ -0,0 +1,1305 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { + return vssrl_vv_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { + return vssrl_vv_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { + return vssrl_vv_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl_vv_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl_vv_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl_vv_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl_vv_u8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssrl_vv_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16mf4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssrl_vv_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { + return vssrl_vv_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { + return vssrl_vv_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { + return vssrl_vv_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { + return vssrl_vv_u16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssrl_vv_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { + return vssrl_vv_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { + return vssrl_vv_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { + return vssrl_vv_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { + return vssrl_vv_u32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { + return vssrl_vv_u64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m1(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { + return vssrl_vv_u64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m2(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { + return vssrl_vv_u64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m4(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { + return vssrl_vv_u64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m8(op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t shift, + size_t vl) { + return vssrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t shift, + size_t vl) { + return vssrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t shift, + size_t vl) { + return vssrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t shift, size_t vl) { + return vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t shift, size_t vl) { + return vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t shift, size_t vl) { + return vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t shift, size_t vl) { + return vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t shift, + size_t vl) { + return vssrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t shift, + size_t vl) { + return vssrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t shift, + size_t vl) { + return vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t shift, + size_t vl) { + return vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t shift, + size_t vl) { + return vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t shift, + size_t vl) { + return vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t shift, + size_t vl) { + return vssrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t shift, + size_t vl) { + return vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t shift, + size_t vl) { + return vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t shift, + size_t vl) { + return vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t shift, + size_t vl) { + return vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t shift, + size_t vl) { + return vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t shift, + size_t vl) { + return vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t shift, + size_t vl) { + return vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t shift, + size_t vl) { + return vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl); +} + +// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, size_t shift, size_t vl) { + return vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c @@ -0,0 +1,2587 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub_vv_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub_vv_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub_vv_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub_vv_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub_vv_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub_vv_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { + return vssub_vv_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { + return vssub_vv_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub_vv_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub_vv_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub_vv_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub_vv_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { + return vssub_vv_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub_vv_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub_vv_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub_vv_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub_vv_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub_vv_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub_vv_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub_vv_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub_vv_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { + return vssubu_vv_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { + return vssubu_vv_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { + return vssubu_vv_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu_vv_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu_vv_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu_vv_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu_vv_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vssubu_vv_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16mf4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vssubu_vv_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return vssubu_vv_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return vssubu_vv_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return vssubu_vv_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return vssubu_vv_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vssubu_vv_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32mf2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return vssubu_vv_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return vssubu_vv_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return vssubu_vv_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return vssubu_vv_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return vssubu_vv_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m1(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return vssubu_vv_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m2(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return vssubu_vv_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m4(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return vssubu_vv_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m8(op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, vint8mf8_t op2, size_t vl) { + return vssub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, + vint8mf8_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, vint8mf4_t op2, size_t vl) { + return vssub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, + vint8mf4_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, vint8mf2_t op2, size_t vl) { + return vssub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, + vint8mf2_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, + vint8m1_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, vint8m2_t op2, size_t vl) { + return vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, + vint8m2_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, vint8m4_t op2, size_t vl) { + return vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, + vint8m4_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, vint8m8_t op2, size_t vl) { + return vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, + vint8m8_t op1, int8_t op2, size_t vl) { + return vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, vint16mf4_t op2, + size_t vl) { + return vssub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, + vint16mf4_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, vint16mf2_t op2, + size_t vl) { + return vssub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, + vint16mf2_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, vint16m1_t op2, size_t vl) { + return vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, + vint16m1_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, vint16m2_t op2, size_t vl) { + return vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, + vint16m2_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, vint16m4_t op2, size_t vl) { + return vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, + vint16m4_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, vint16m8_t op2, size_t vl) { + return vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, + vint16m8_t op1, int16_t op2, size_t vl) { + return vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, vint32mf2_t op2, + size_t vl) { + return vssub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, + vint32mf2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, vint32m1_t op2, size_t vl) { + return vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, + vint32m1_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, vint32m2_t op2, size_t vl) { + return vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, + vint32m2_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, vint32m4_t op2, size_t vl) { + return vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, + vint32m4_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, vint32m8_t op2, size_t vl) { + return vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, + vint32m8_t op1, int32_t op2, size_t vl) { + return vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, vint64m1_t op2, size_t vl) { + return vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, + vint64m1_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, vint64m2_t op2, size_t vl) { + return vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, + vint64m2_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, vint64m4_t op2, size_t vl) { + return vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, + vint64m4_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vv_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, vint64m8_t op2, size_t vl) { + return vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssub_vx_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, + vint64m8_t op1, int64_t op2, size_t vl) { + return vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, vuint8mf8_t op2, + size_t vl) { + return vssubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, + vuint8mf8_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, vuint8mf4_t op2, + size_t vl) { + return vssubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, + vuint8mf4_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, vuint8mf2_t op2, + size_t vl) { + return vssubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, + vuint8mf2_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, + vuint8m1_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, + vuint8m2_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, + vuint8m4_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, + vuint8m8_t op1, uint8_t op2, size_t vl) { + return vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, vuint16mf4_t op2, + size_t vl) { + return vssubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, + vuint16mf4_t op1, uint16_t op2, + size_t vl) { + return vssubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, vuint16mf2_t op2, + size_t vl) { + return vssubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, + vuint16mf2_t op1, uint16_t op2, + size_t vl) { + return vssubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, vuint16m1_t op2, + size_t vl) { + return vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, + vuint16m1_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, vuint16m2_t op2, + size_t vl) { + return vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, + vuint16m2_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, vuint16m4_t op2, + size_t vl) { + return vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, + vuint16m4_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, vuint16m8_t op2, + size_t vl) { + return vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, + vuint16m8_t op1, uint16_t op2, size_t vl) { + return vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, vuint32mf2_t op2, + size_t vl) { + return vssubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vuint32mf2_t op1, uint32_t op2, + size_t vl) { + return vssubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, vuint32m1_t op2, + size_t vl) { + return vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vuint32m1_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, vuint32m2_t op2, + size_t vl) { + return vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vuint32m2_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, vuint32m4_t op2, + size_t vl) { + return vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vuint32m4_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, vuint32m8_t op2, + size_t vl) { + return vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vuint32m8_t op1, uint32_t op2, size_t vl) { + return vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, vuint64m1_t op2, + size_t vl) { + return vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vuint64m1_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, vuint64m2_t op2, + size_t vl) { + return vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vuint64m2_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, vuint64m4_t op2, + size_t vl) { + return vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vuint64m4_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, vuint64m8_t op2, + size_t vl) { + return vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vuint64m8_t op1, uint64_t op2, size_t vl) { + return vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); +}