diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -157,8 +157,9 @@ // This builtin has a granted vector length parameter in the last position. bit HasVL = true; - // This builtin supports function overloading and has a mangled name. - bit HasGeneric = true; + // This builtin supports non-masked function overloading api. + // All masked operations support oerloading api. + bit HasNoMaskedOverloaded = true; // Reads or writes "memory" or has other side-effects. bit HasSideEffects = false; @@ -231,7 +232,7 @@ let Name = NAME # "_v", IRName = "vle", IRNameMask ="vle_mask", - HasGeneric = false, + HasNoMaskedOverloaded = false, ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); @@ -250,8 +251,7 @@ } multiclass RVVIndexedLoad { - let HasGeneric = false, - ManualCodegen = [{ + let ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); }], @@ -280,7 +280,6 @@ IRNameMask = "vse_mask", HasMaskedOffOperand = false, PermuteOperands = [1, 0], // C/C++ Operand: (ptr, value, vl). Builtin: (value, ptr, vl) - HasGeneric = false, ManualCodegen = [{ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; @@ -303,7 +302,6 @@ let HasVL = false, HasMask = false, HasSideEffects = true, - HasGeneric = false, Log2LMUL = [0], ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type { diff --git a/clang/lib/Headers/CMakeLists.txt b/clang/lib/Headers/CMakeLists.txt --- a/clang/lib/Headers/CMakeLists.txt +++ b/clang/lib/Headers/CMakeLists.txt @@ -211,8 +211,6 @@ clang_generate_header(-gen-arm-cde-header arm_cde.td arm_cde.h) # Generate riscv_vector.h clang_generate_header(-gen-riscv-vector-header riscv_vector.td riscv_vector.h) -# Generate riscv_vector_generic.h -clang_generate_header(-gen-riscv-vector-generic-header riscv_vector.td riscv_vector_generic.h) add_custom_target(clang-resource-headers ALL DEPENDS ${out_files}) set_target_properties(clang-resource-headers PROPERTIES diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -8,7 +8,7 @@ // RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s // ASM-NOT: warning -#include +#include // CHECK-RV32-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV32-NEXT: entry: @@ -1253,7 +1253,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m( @@ -1267,7 +1267,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m( @@ -1281,7 +1281,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m( @@ -1295,7 +1295,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m( @@ -1309,7 +1309,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m( @@ -1323,7 +1323,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m( @@ -1337,7 +1337,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m( @@ -1351,7 +1351,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m( @@ -1365,7 +1365,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m( @@ -1379,7 +1379,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m( @@ -1393,7 +1393,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m( @@ -1407,7 +1407,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m( @@ -1421,7 +1421,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m( @@ -1435,7 +1435,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m( @@ -1449,7 +1449,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m( @@ -1463,7 +1463,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m( @@ -1477,7 +1477,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m( @@ -1491,7 +1491,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m( @@ -1505,7 +1505,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m( @@ -1519,7 +1519,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m( @@ -1533,7 +1533,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m( @@ -1547,7 +1547,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m( @@ -1561,7 +1561,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m( @@ -1575,7 +1575,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m( @@ -1589,7 +1589,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m( @@ -1603,7 +1603,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m( @@ -1617,7 +1617,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m( @@ -1631,7 +1631,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m( @@ -1645,7 +1645,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m( @@ -1659,7 +1659,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m( @@ -1673,7 +1673,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m( @@ -1687,7 +1687,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m( @@ -1701,7 +1701,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m( @@ -1715,7 +1715,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m( @@ -1729,7 +1729,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m( @@ -1743,7 +1743,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m( @@ -1757,7 +1757,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m( @@ -1771,7 +1771,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m( @@ -1785,7 +1785,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m( @@ -1799,7 +1799,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m( @@ -1813,7 +1813,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m( @@ -1827,7 +1827,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m( @@ -1841,7 +1841,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m( @@ -1855,7 +1855,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m( @@ -1869,7 +1869,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m( @@ -1883,7 +1883,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m( @@ -1897,7 +1897,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m( @@ -1911,7 +1911,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m( @@ -1925,7 +1925,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m( @@ -1939,7 +1939,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m( @@ -1953,7 +1953,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m( @@ -1967,7 +1967,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m( @@ -1981,7 +1981,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m( @@ -1995,7 +1995,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m( @@ -2009,7 +2009,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m( @@ -2023,7 +2023,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m( @@ -2037,7 +2037,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m( @@ -2051,7 +2051,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m( @@ -2065,7 +2065,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m( @@ -2079,7 +2079,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m( @@ -2093,7 +2093,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m( @@ -2107,7 +2107,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m( @@ -2121,7 +2121,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m( @@ -2135,7 +2135,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m( @@ -2149,7 +2149,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m( @@ -2163,7 +2163,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m( @@ -2177,7 +2177,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m( @@ -2191,7 +2191,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m( @@ -2205,7 +2205,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m( @@ -2219,7 +2219,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m( @@ -2233,7 +2233,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m( @@ -2247,7 +2247,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m( @@ -2261,7 +2261,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m( @@ -2275,7 +2275,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m( @@ -2289,7 +2289,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m( @@ -2303,7 +2303,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m( @@ -2317,7 +2317,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m( @@ -2331,7 +2331,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m( @@ -2345,7 +2345,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m( @@ -2359,7 +2359,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m( @@ -2373,7 +2373,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m( @@ -2387,7 +2387,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m( @@ -2401,7 +2401,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m( @@ -2415,7 +2415,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m( @@ -2429,7 +2429,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m( @@ -2443,7 +2443,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m( @@ -2457,7 +2457,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m( @@ -2471,6 +2471,6 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd_m(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -8,7 +8,7 @@ // RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s // ASM-NOT: warning -#include +#include // CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV32-NEXT: entry: @@ -273,7 +273,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m( @@ -287,7 +287,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m( @@ -301,7 +301,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m( @@ -315,7 +315,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m( @@ -329,7 +329,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m( @@ -343,7 +343,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m( @@ -357,7 +357,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m( @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m( @@ -385,7 +385,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m( @@ -399,7 +399,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m( @@ -413,7 +413,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m( @@ -427,7 +427,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m( @@ -441,7 +441,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m( @@ -455,7 +455,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m( @@ -469,7 +469,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m( @@ -483,7 +483,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m( @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } // CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m( @@ -511,6 +511,6 @@ // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd_m(mask, maskedoff, op1, op2, vl); + return vfadd(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c @@ -0,0 +1,859 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { + return vle8(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { + return vle16(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { + return vle32(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { + return vle64(mask, maskedoff, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -0,0 +1,6125 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { + // + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei8_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { + return vloxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei16_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { + return vloxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei32_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { + return vloxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vloxei64_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { + return vloxei64(mask, maskedoff, base, bindex, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -0,0 +1,6123 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei8_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { + return vluxei8(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei16_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { + return vluxei16(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei32_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { + return vluxei32(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV32-LABEL: @test_vluxei64_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { + return vluxei64(mask, maskedoff, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c @@ -0,0 +1,1707 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11:[0-9]+]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11:[0-9]+]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { + return vse8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { + return vse32(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { + return vse64(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { + return vse8(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { + return vse32(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { + return vse64(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { + return vse64(mask, base, value, vl); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -6,9 +6,9 @@ // //===----------------------------------------------------------------------===// // -// This tablegen backend is responsible for emitting riscv_vector.h and -// riscv_vector_generic.h, which includes a declaration and definition of each -// intrinsic fucntions specified in https://github.com/riscv/rvv-intrinsic-doc. +// This tablegen backend is responsible for emitting riscv_vector.h which +// includes a declaration and definition of each intrinsic fucntions specified +// in https://github.com/riscv/rvv-intrinsic-doc. // // See also the documentation in include/clang/Basic/riscv_vector.td. // @@ -150,9 +150,10 @@ std::string MangledName; std::string IRName; bool HasSideEffects; + bool IsMask; bool HasMaskedOffOperand; bool HasVL; - bool HasGeneric; + bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; RVVTypePtr OutputType; // Builtin output type @@ -168,7 +169,7 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef IRName, bool HasSideEffects, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasGeneric, + bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &PermuteOperands); @@ -179,9 +180,10 @@ bool hasSideEffects() const { return HasSideEffects; } bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } bool hasVL() const { return HasVL; } - bool hasGeneric() const { return HasGeneric; } + bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } + bool isMask() const { return IsMask; } size_t getNumOperand() const { return InputTypes.size(); } StringRef getIRName() const { return IRName; } uint8_t getRISCVExtensions() const { return RISCVExtensions; } @@ -214,9 +216,6 @@ /// Emit riscv_vector.h void createHeader(raw_ostream &o); - /// Emit riscv_generic.h - void createGenericHeader(raw_ostream &o); - /// Emit all the __builtin prototypes and code needed by Sema. void createBuiltins(raw_ostream &o); @@ -236,7 +235,8 @@ ArrayRef PrototypeSeq); Optional computeType(BasicType BT, int Log2LMUL, StringRef Proto); - /// Emit Acrh predecessor definitions and body + /// Emit Acrh predecessor definitions and body, assume the element of Defs are + /// sorted by extension. void emitArchMacroAndBody( std::vector> &Defs, raw_ostream &o, std::function); @@ -694,13 +694,13 @@ StringRef NewMangledName, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, - bool HasGeneric, bool HasAutoDef, + bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, const std::vector &PermuteOperands) - : IRName(IRName), HasSideEffects(HasSideEffects), + : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), - HasGeneric(HasGeneric), HasAutoDef(HasAutoDef), + HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()) { // Init Name and MangledName @@ -713,7 +713,6 @@ Name += "_" + Suffix.str(); if (IsMask) { Name += "_m"; - MangledName += "_m"; } // Init RISC-V extensions for (const auto &T : OutInTypes) { @@ -934,30 +933,35 @@ } OS << "#endif\n\n"; + // The same extension include in the same arch guard marco. + std::stable_sort(Defs.begin(), Defs.end(), + [](const std::unique_ptr &A, + const std::unique_ptr &B) { + return A->getRISCVExtensions() < B->getRISCVExtensions(); + }); + // Print intrinsic functions with macro emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { Inst.emitIntrinsicMacro(OS); }); - OS << "\n#ifdef __cplusplus\n"; - OS << "}\n"; - OS << "#endif // __riscv_vector\n"; - OS << "#endif // __RISCV_VECTOR_H\n"; -} + OS << "#define __riscv_v_intrinsic_overloading 1\n"; -void RVVEmitter::createGenericHeader(raw_ostream &OS) { - std::vector> Defs; - createRVVIntrinsics(Defs); + // Print Overloaded APIs + OS << "#define __rvv_overloaded static inline " + "__attribute__((__always_inline__, __nodebug__, __overloadable__))\n"; - OS << "#include \n\n"; - // Print intrinsic functions macro emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { - if (!Inst.hasGeneric()) + if (!Inst.isMask() && !Inst.hasNoMaskedOverloaded()) return; - OS << "static inline __attribute__((__always_inline__, __nodebug__, " - "__overloadable__))\n"; + OS << "__rvv_overloaded "; Inst.emitMangledFuncDef(OS); }); + + OS << "\n#ifdef __cplusplus\n"; + OS << "}\n"; + OS << "#endif // __riscv_vector\n"; + OS << "#endif // __RISCV_VECTOR_H\n"; } void RVVEmitter::createBuiltins(raw_ostream &OS) { @@ -1041,7 +1045,7 @@ bool HasMask = R->getValueAsBit("HasMask"); bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); - bool HasGeneric = R->getValueAsBit("HasGeneric"); + bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); bool HasSideEffects = R->getValueAsBit("HasSideEffects"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); @@ -1092,18 +1096,18 @@ // Create a non-mask intrinsic Out.push_back(std::make_unique( Name, SuffixStr, MangledName, IRName, HasSideEffects, - /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasGeneric, - HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes, - PermuteOperands)); + /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, + HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), + IntrinsicTypes, PermuteOperands)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = computeTypes(I, Log2LMUL, ProtoMaskSeq); Out.push_back(std::make_unique( Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, - /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasGeneric, - HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), - IntrinsicTypes, PermuteOperands)); + /*IsMask=*/true, HasMaskedOffOperand, HasVL, + HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, + MaskTypes.getValue(), IntrinsicTypes, PermuteOperands)); } } // end for Log2LMULList } // end for TypeRange @@ -1148,13 +1152,6 @@ void RVVEmitter::emitArchMacroAndBody( std::vector> &Defs, raw_ostream &OS, std::function PrintBody) { - - // The same extension include in the same arch guard marco. - std::stable_sort(Defs.begin(), Defs.end(), - [](const std::unique_ptr &A, - const std::unique_ptr &B) { - return A->getRISCVExtensions() < B->getRISCVExtensions(); - }); uint8_t PrevExt = (*Defs.begin())->getRISCVExtensions(); bool NeedEndif = emitExtDefStr(PrevExt, OS); for (auto &Def : Defs) { @@ -1192,10 +1189,6 @@ RVVEmitter(Records).createHeader(OS); } -void EmitRVVGenericHeader(RecordKeeper &Records, raw_ostream &OS) { - RVVEmitter(Records).createGenericHeader(OS); -} - void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) { RVVEmitter(Records).createBuiltins(OS); } diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp --- a/clang/utils/TableGen/TableGen.cpp +++ b/clang/utils/TableGen/TableGen.cpp @@ -84,7 +84,6 @@ GenArmCdeBuiltinCG, GenArmCdeBuiltinAliases, GenRISCVVectorHeader, - GenRISCVVectorGenericHeader, GenRISCVVectorBuiltins, GenRISCVVectorBuiltinCG, GenAttrDocs, @@ -234,9 +233,6 @@ "Generate list of valid ARM CDE builtin aliases for clang"), clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header", "Generate riscv_vector.h for clang"), - clEnumValN(GenRISCVVectorGenericHeader, - "gen-riscv-vector-generic-header", - "Generate riscv_vector_generic.h for clang"), clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins", "Generate riscv_vector_builtins.inc for clang"), clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen", @@ -444,9 +440,6 @@ case GenRISCVVectorHeader: EmitRVVHeader(Records, OS); break; - case GenRISCVVectorGenericHeader: - EmitRVVGenericHeader(Records, OS); - break; case GenRISCVVectorBuiltins: EmitRVVBuiltins(Records, OS); break; diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h --- a/clang/utils/TableGen/TableGenBackends.h +++ b/clang/utils/TableGen/TableGenBackends.h @@ -107,7 +107,6 @@ void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); -void EmitRVVGenericHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS); diff --git a/llvm/docs/CommandGuide/tblgen.rst b/llvm/docs/CommandGuide/tblgen.rst --- a/llvm/docs/CommandGuide/tblgen.rst +++ b/llvm/docs/CommandGuide/tblgen.rst @@ -545,10 +545,6 @@ Generate ``riscv_vector.h`` for Clang. -.. option:: -gen-riscv-vector-generic-header - - Generate ``riscv_vector_generic.h`` for Clang. - .. option:: -gen-riscv-vector-builtins Generate ``riscv_vector_builtins.inc`` for Clang. diff --git a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn --- a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn +++ b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn @@ -50,13 +50,6 @@ output_name = "riscv_vector.h" } -# Generate riscv_vector_generic.h -clang_tablegen("riscv_vector_generic") { - args = [ "-gen-riscv-vector-generic-header" ] - td_file = "//clang/include/clang/Basic/riscv_vector.td" - output_name = "riscv_vector_generic.h" -} - copy("tablegen_headers") { visibility = [ ":Headers" ] deps = [ @@ -67,7 +60,6 @@ ":arm_neon", ":arm_sve", ":riscv_vector", - ":riscv_vector_generic", ] sources = [] foreach(dep, deps) {