diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -470,14 +470,26 @@ } } -let UnMaskedPolicyScheme = HasPolicyOperand, - HasMaskedOffOperand = false, - IsPrototypeDefaultTU = true in { +let UnMaskedPolicyScheme = HasPassthruOperand, + IsPrototypeDefaultTU = false, + ManualCodegen = [{ + if (IsMasked) { + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + if (PolicyAttrs == TAIL_AGNOSTIC_MASK_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); + } else { + if (PolicyAttrs == TAIL_AGNOSTIC) + Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType)); + } + + Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); + IntrinsicTypes = {ResultType, Ops.back()->getType()}; + }] in { multiclass RVVSlideDownBuiltinSet { defm "" : RVVOutBuiltinSet; + [["vx","v", "vvz"]]>; defm "" : RVVOutBuiltinSet; + [["vx","Uv", "UvUvz"]]>; } } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c @@ -1,1273 +1,4255 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8 (vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4 (vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2 (vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, - size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1 (vint8m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, - size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2 (vint8m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, - size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4 (vint8m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, - size_t vl) { - return vslidedown(dst, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8 (vint8m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4 (vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2 (vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1 (vint16m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2 (vint16m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4 (vint16m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8 (vint16m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2 (vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1 (vint32m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2 (vint32m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4 (vint32m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8 (vint32m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1 (vint64m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2 (vint64m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4 (vint64m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8 (vint64m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8 (vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4 (vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2 (vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1 (vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2 (vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4 (vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8 (vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4 (vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2 (vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1 (vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2 (vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4 (vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8 (vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2 (vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1 (vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2 (vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4 (vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8 (vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1 (vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2 (vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4 (vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8 (vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2 (vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1 (vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2 (vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4 (vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8 (vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1 (vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2 (vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4 (vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, - size_t offset, size_t vl) { - return vslidedown(dst, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8 (vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, - vint8mf8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, - vint8mf4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, - vint8mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, - size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, - size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, - size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, - size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, - vint16mf4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, - vint16mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, - vint16m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, - vint16m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, - vint16m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, - vint32mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, - vint32m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, - vint32m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, - vint32m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, - vint64m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, - vint64m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, - vint64m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, - vuint8mf8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, - vuint8mf4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, - vuint8mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, - vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, - vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, - vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, - vuint16mf4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, - vuint16mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, - vuint16m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, - vuint16m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, - vuint16m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, - vuint32mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, - vuint32m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, - vuint32m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, - vuint32m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, - vuint64m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, - vuint64m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, - vuint64m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, - vfloat32mf2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, - vfloat32m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, - vfloat32m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, - vfloat32m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, - vfloat64m2_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, - vfloat64m4_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, - vfloat64m8_t src, size_t offset, - size_t vl) { - return vslidedown(mask, dst, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tu (vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tu (vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tu (vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tu (vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tu (vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tu (vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tu (vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tu (vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tu (vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tu (vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tu (vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tu (vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tu (vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tu(merge, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tu (vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tu(merge, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tu (vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tu(merge, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tu (vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_ta(dest, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tu (vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_ta(dest, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tu (vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_ta(dest, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tu (vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tuma( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tuma(mask, merge, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tu (vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tuma( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tuma(mask, merge, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tu (vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tuma( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tuma(mask, merge, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tu (vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tumu(mask, merge, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tumu(mask, merge, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tumu(mask, merge, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tama(mask, dest, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tama(mask, dest, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tama(mask, dest, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tamu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tamu(mask, merge, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tamu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tamu(mask, merge, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tamu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tu (vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tu (vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tu (vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tu (vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tu (vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_tamu(mask, merge, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu (vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tu (vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tu (vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tu (vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tu (vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tu (vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tu (vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_ta (vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_ta (vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_ta (vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_ta (vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_ta (vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_ta (vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_ta (vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_ta (vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_ta (vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_ta (vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_ta (vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_ta (vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_ta (vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_ta (vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_ta (vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_ta (vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_ta (vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_ta (vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_ta (vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_ta (vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_ta (vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_ta (vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_ta (vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_ta (vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_ta (vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_ta (vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_ta (vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_ta (vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_ta (vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_ta (vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_ta (vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_ta (vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_ta (vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_ta (vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_ta (vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_ta (vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_ta (vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_ta (vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_ta (vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_ta (vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_ta (vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_ta (vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_ta (vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_ta (vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_ta (vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_ta (vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_ta (vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_ta (vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_ta (vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_ta (vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_ta (vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_ta (vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_ta (vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_ta (vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_ta (vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_ta (vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_ta (vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_ta (vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_ta (vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tuma (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tuma (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tuma (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tuma (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tuma (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tuma (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tuma (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tuma (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tuma (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tuma (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tuma (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tuma (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tuma (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tuma (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tuma (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tuma (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tuma (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tuma (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tuma (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tuma (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tuma (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tuma (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tuma (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tuma (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tuma (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tuma (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tuma (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tuma (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tuma (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tuma (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tuma (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tuma (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tuma (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tuma (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tuma (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tuma (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tuma (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tuma (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tuma (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tuma (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tuma (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tuma (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tuma (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tuma (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tuma (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tuma (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tuma (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tuma (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tuma (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tuma (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tuma (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tuma (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tuma (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tuma (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tuma (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tuma (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tuma (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tuma (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tuma (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tumu (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tumu (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tumu (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tumu (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tumu (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tumu (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tumu (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tumu (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tumu (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tumu (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tumu (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tumu (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tumu (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tumu (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tumu (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tumu (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tumu (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tumu (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tumu (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tumu (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tumu (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tumu (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tumu (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tumu (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tumu (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tumu (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tumu (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tumu (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tumu (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tumu (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tumu (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tumu (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tumu (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tumu (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tama (vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tama (vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tama (vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tama (vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tama (vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tama (vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tama (vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tama (vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tama (vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tama (vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tama (vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tama (vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tama (vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tama (vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tama (vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tama (vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tama (vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tama (vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tama (vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tama (vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tama (vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tama (vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tama (vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tama (vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tama (vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tama (vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tama (vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tama (vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tama (vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tama (vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tama (vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tama (vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tama (vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tama (vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tama (vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tama (vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tama (vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tama (vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tama (vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tama (vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tama (vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tama (vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tama (vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tama (vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tama (vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tama (vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tama (vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tama (vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tama (vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tama (vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tama (vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tama (vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tama (vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tama (vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tama (vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tama (vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tama (vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tama (vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tama (vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tamu (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tamu (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tamu (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tamu (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tamu (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tamu (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tamu (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tamu (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tamu (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tamu (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tamu (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tamu (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tamu (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tamu (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tamu (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tamu (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tamu (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tamu (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tamu (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tamu (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tamu (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tamu (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tamu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tamu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tamu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tamu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tamu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tamu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tamu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tamu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tamu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tamu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tamu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tamu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tamu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tamu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tamu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tamu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tamu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tamu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tamu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tamu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tamu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tamu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tamu (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tamu (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tamu (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tamu (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tamu (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tamu (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tamu (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tamu (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tamu (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tamu (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tamu (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tamu (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tamu (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tamu (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tamu (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_tamu(mask, maskedoff, src, offset, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c @@ -8,1375 +8,4248 @@ // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8mf8(dst, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8 (vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8mf4(dst, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4 (vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8mf2(dst, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2 (vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8m1(dst, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1 (vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8m2(dst, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2 (vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8m4(dst, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4 (vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8m8(dst, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8 (vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i16mf4(dst, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4 (vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i16mf2(dst, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2 (vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i16m1(dst, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1 (vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i16m2(dst, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2 (vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i16m4(dst, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4 (vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i16m8(dst, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8 (vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i32mf2(dst, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2 (vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i32m1(dst, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1 (vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i32m2(dst, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2 (vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i32m4(dst, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4 (vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i32m8(dst, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8 (vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i64m1(dst, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1 (vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i64m2(dst, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2 (vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i64m4(dst, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4 (vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i64m8(dst, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8 (vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8mf8(dst, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8 (vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8mf4(dst, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4 (vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8mf2(dst, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2 (vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8m1(dst, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1 (vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8m2(dst, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2 (vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8m4(dst, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4 (vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u8m8(dst, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8 (vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u16mf4(dst, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4 (vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u16mf2(dst, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2 (vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u16m1(dst, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1 (vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u16m2(dst, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2 (vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u16m4(dst, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4 (vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u16m8(dst, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8 (vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u32mf2(dst, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2 (vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u32m1(dst, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1 (vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u32m2(dst, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2 (vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u32m4(dst, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4 (vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u32m8(dst, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8 (vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u64m1(dst, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1 (vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u64m2(dst, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2 (vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u64m4(dst, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4 (vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_u64m8(dst, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8 (vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f32mf2(dst, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2 (vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f32m1(dst, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1 (vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f32m2(dst, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2 (vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f32m4(dst, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4 (vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f32m8(dst, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8 (vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f64m1(dst, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1 (vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f64m2(dst, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2 (vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f64m4(dst, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4 (vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_f64m8(dst, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8 (vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8(src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, - vint8mf8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8mf8_m(mask, dst, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, - vint8mf4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8mf4_m(mask, dst, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, - vint8mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i8mf2_m(mask, dst, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8m1_m(mask, dst, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8m2_m(mask, dst, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8m4_m(mask, dst, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, - size_t offset, size_t vl) { - return vslidedown_vx_i8m8_m(mask, dst, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, - vint16mf4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i16mf4_m(mask, dst, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, - vint16mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i16mf2_m(mask, dst, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, - vint16m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i16m1_m(mask, dst, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, - vint16m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i16m2_m(mask, dst, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, - vint16m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i16m4_m(mask, dst, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, - vint16m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i16m8_m(mask, dst, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, - vint32mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i32mf2_m(mask, dst, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, - vint32m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i32m1_m(mask, dst, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, - vint32m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i32m2_m(mask, dst, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, - vint32m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i32m4_m(mask, dst, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, - vint32m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i32m8_m(mask, dst, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, - vint64m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i64m1_m(mask, dst, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, - vint64m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i64m2_m(mask, dst, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, - vint64m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i64m4_m(mask, dst, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, - vint64m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_i64m8_m(mask, dst, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, - vuint8mf8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u8mf8_m(mask, dst, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, - vuint8mf4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u8mf4_m(mask, dst, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, - vuint8mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u8mf2_m(mask, dst, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, - vuint8m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m1_m(mask, dst, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, - vuint8m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m2_m(mask, dst, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, - vuint8m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m4_m(mask, dst, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, - vuint8m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_u8m8_m(mask, dst, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, - vuint16mf4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u16mf4_m(mask, dst, src, offset, vl); +vuint16mf4_t test_vslidedown_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, - vuint16mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u16mf2_m(mask, dst, src, offset, vl); +vuint16mf2_t test_vslidedown_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, - vuint16m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u16m1_m(mask, dst, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, - vuint16m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u16m2_m(mask, dst, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, - vuint16m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u16m4_m(mask, dst, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, - vuint16m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u16m8_m(mask, dst, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, - vuint32mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u32mf2_m(mask, dst, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, - vuint32m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u32m1_m(mask, dst, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, - vuint32m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u32m2_m(mask, dst, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, - vuint32m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u32m4_m(mask, dst, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, - vuint32m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u32m8_m(mask, dst, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, - vuint64m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u64m1_m(mask, dst, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, - vuint64m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u64m2_m(mask, dst, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, - vuint64m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u64m4_m(mask, dst, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, - vuint64m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_u64m8_m(mask, dst, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_m(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_m(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_m(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_m(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_m(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_m(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, - vfloat32mf2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f32mf2_m(mask, dst, src, offset, vl); +vfloat32mf2_t test_vslidedown_vx_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, - vfloat32m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f32m1_m(mask, dst, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, - vfloat32m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f32m2_m(mask, dst, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, - vfloat32m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f32m4_m(mask, dst, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, - vfloat32m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f32m8_m(mask, dst, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, - vfloat64m1_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f64m1_m(mask, dst, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, - vfloat64m2_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f64m2_m(mask, dst, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, - vfloat64m4_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f64m4_m(mask, dst, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, - vfloat64m8_t src, size_t offset, - size_t vl) { - return vslidedown_vx_f64m8_m(mask, dst, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_m(mask, maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vslidedown_vx_f16mf4 (vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4(dest, src, offset, vl); +vint8mf8_t test_vslidedown_vx_i8mf8_tu (vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vslidedown_vx_f16mf2 (vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2(dest, src, offset, vl); +vint8mf4_t test_vslidedown_vx_i8mf4_tu (vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vslidedown_vx_f16m1 (vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1(dest, src, offset, vl); +vint8mf2_t test_vslidedown_vx_i8mf2_tu (vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vslidedown_vx_f16m2 (vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2(dest, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_tu (vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vslidedown_vx_f16m4 (vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4(dest, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_tu (vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8(dest, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_tu (vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf4_m(mask, dest, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_tu (vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16mf2_m(mask, dest, src, offset, vl); +vint16mf4_t test_vslidedown_vx_i16mf4_tu (vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m1_m(mask, dest, src, offset, vl); +vint16mf2_t test_vslidedown_vx_i16mf2_tu (vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m2_m(mask, dest, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_tu (vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m4_m(mask, dest, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_tu (vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tu (vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) { - return vslidedown_vx_f16m8_m(mask, dest, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_tu (vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_tu(maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tu(merge, src, offset, vl); +vint32mf2_t test_vslidedown_vx_i32mf2_tu (vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tu(merge, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_tu (vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tu(merge, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_tu (vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_ta(dest, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_tu (vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_ta(dest, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_tu (vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_ta(dest, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_tu (vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tuma( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tuma(mask, merge, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_tu (vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tuma( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tuma(mask, merge, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_tu (vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tuma( +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tuma(mask, merge, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_tu (vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tumu(mask, merge, src, offset, vl); +vuint8mf8_t test_vslidedown_vx_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tumu(mask, merge, src, offset, vl); +vuint8mf4_t test_vslidedown_vx_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tumu(mask, merge, src, offset, vl); +vuint8mf2_t test_vslidedown_vx_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tama(mask, dest, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tama(mask, dest, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tama(mask, dest, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tamu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslidedown_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_i32mf2_tamu(mask, merge, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tamu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslidedown_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_u32mf2_tamu(mask, merge, src, offset, vl); +vuint32mf2_t test_vslidedown_vx_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tu(maskedoff, src, offset, vl); } -// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tamu( +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tu (vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tu (vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tu (vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tu (vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tu (vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu (vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tu (vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tu (vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tu (vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tu (vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tu (vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tu (vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_tu(maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_ta (vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_ta (vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_ta (vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_ta (vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_ta (vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_ta (vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_ta (vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_ta (vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_ta (vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_ta (vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_ta (vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_ta (vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_ta (vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_ta (vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_ta (vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_ta (vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_ta (vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_ta (vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_ta (vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_ta (vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_ta (vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_ta (vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_ta (vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_ta (vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_ta (vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_ta (vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_ta (vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_ta (vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_ta (vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_ta (vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_ta (vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_ta (vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_ta (vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_ta (vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_ta (vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_ta (vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_ta (vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_ta (vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_ta (vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_ta (vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_ta (vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_ta (vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_ta (vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_ta (vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_ta (vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_ta (vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_ta (vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_ta (vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_ta (vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_ta (vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_ta (vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_ta (vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_ta (vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_ta (vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_ta (vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_ta (vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_ta (vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_ta (vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_ta (vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_ta(src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tuma (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tuma (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tuma (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tuma (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tuma (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tuma (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tuma (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tuma (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tuma (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tuma (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tuma (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tuma (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tuma (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tuma (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tuma (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tuma (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tuma (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tuma (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tuma (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tuma (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tuma (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tuma (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tuma (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tuma (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tuma (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tuma (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tuma (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tuma (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tuma (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tuma (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tuma (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tuma (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tuma (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tuma (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tuma (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tuma (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tuma (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tuma (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tuma (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tuma (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tuma (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tuma (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tuma (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tuma (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tuma (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tuma (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tuma (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tuma (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tuma (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tuma (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tuma (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tuma (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tuma (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tuma (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tuma (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tuma (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tuma (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tuma (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tuma( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tuma (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_tuma(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tumu (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tumu (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tumu (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tumu (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tumu (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tumu (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tumu (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tumu (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tumu (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tumu (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tumu (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tumu (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tumu (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tumu (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tumu (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tumu (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tumu (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tumu (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tumu (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tumu (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tumu (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tumu (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tumu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tumu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tumu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tumu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tumu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tumu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tumu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tumu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tumu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tumu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tumu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tumu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tumu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tumu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tumu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tumu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tumu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tumu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tumu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tumu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tumu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tumu (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tumu (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tumu (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tumu (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tumu (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tumu (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tumu (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tumu (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tumu (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tumu (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tumu (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tumu (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_tumu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tama (vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tama (vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tama (vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tama (vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tama (vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tama (vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tama (vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tama (vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tama (vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tama (vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tama (vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tama (vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tama (vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tama (vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tama (vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tama (vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tama (vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tama (vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tama (vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tama (vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tama (vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tama (vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tama (vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tama (vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tama (vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tama (vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tama (vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tama (vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tama (vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tama (vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tama (vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tama (vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tama (vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tama (vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tama (vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tama (vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tama (vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tama (vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tama (vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tama (vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tama (vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tama (vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tama (vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tama (vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tama (vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tama (vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tama (vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tama (vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tama (vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tama (vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tama (vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tama (vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tama (vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tama (vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tama (vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tama (vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tama (vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tama (vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tama( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vslidedown_vx_f64m8_tama (vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_tama(mask, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslidedown_vx_i8mf8_tamu (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslidedown_vx_i8mf4_tamu (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslidedown_vx_i8mf2_tamu (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslidedown_vx_i8m1_tamu (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslidedown_vx_i8m2_tamu (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslidedown_vx_i8m4_tamu (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslidedown_vx_i8m8_tamu (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i8m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslidedown_vx_i16mf4_tamu (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslidedown_vx_i16mf2_tamu (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslidedown_vx_i16m1_tamu (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslidedown_vx_i16m2_tamu (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslidedown_vx_i16m4_tamu (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslidedown_vx_i16m8_tamu (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i16m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslidedown_vx_i32mf2_tamu (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslidedown_vx_i32m1_tamu (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslidedown_vx_i32m2_tamu (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslidedown_vx_i32m4_tamu (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslidedown_vx_i32m8_tamu (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i32m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslidedown_vx_i64m1_tamu (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslidedown_vx_i64m2_tamu (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslidedown_vx_i64m4_tamu (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslidedown_vx_i64m8_tamu (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_i64m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslidedown_vx_u8mf8_tamu (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslidedown_vx_u8mf4_tamu (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslidedown_vx_u8mf2_tamu (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslidedown_vx_u8m1_tamu (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslidedown_vx_u8m2_tamu (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslidedown_vx_u8m4_tamu (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslidedown_vx_u8m8_tamu (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u8m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslidedown_vx_u16mf4_tamu (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslidedown_vx_u16mf2_tamu (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslidedown_vx_u16m1_tamu (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslidedown_vx_u16m2_tamu (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslidedown_vx_u16m4_tamu (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslidedown_vx_u16m8_tamu (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u16m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslidedown_vx_u32mf2_tamu (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslidedown_vx_u32m1_tamu (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslidedown_vx_u32m2_tamu (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslidedown_vx_u32m4_tamu (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslidedown_vx_u32m8_tamu (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u32m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslidedown_vx_u64m1_tamu (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslidedown_vx_u64m2_tamu (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslidedown_vx_u64m4_tamu (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslidedown_vx_u64m8_tamu (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_u64m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vslidedown_vx_f16mf4_tamu (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vslidedown_vx_f16mf2_tamu (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vslidedown_vx_f16m1_tamu (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vslidedown_vx_f16m2_tamu (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vslidedown_vx_f16m4_tamu (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vslidedown_vx_f16m8_tamu (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f16m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vslidedown_vx_f32mf2_tamu (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32mf2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vslidedown_vx_f32m1_tamu (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vslidedown_vx_f32m2_tamu (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vslidedown_vx_f32m4_tamu (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vslidedown_vx_f32m8_tamu (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f32m8_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vslidedown_vx_f64m1_tamu (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m1_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vslidedown_vx_f64m2_tamu (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m2_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vslidedown_vx_f64m4_tamu (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m4_tamu(mask, maskedoff, src, offset, vl); +} + +// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tamu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vslidedown_vx_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t src, size_t offset, size_t vl) { - return vslidedown_vx_f32mf2_tamu(mask, merge, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_tamu (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return vslidedown_vx_f64m8_tamu(mask, maskedoff, src, offset, vl); }