diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -595,6 +595,7 @@ ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }], ManualCodegenMask= [{ // Move mask to right before vl. @@ -628,6 +629,7 @@ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); Value *NewVL = Ops[1]; Ops.erase(Ops.begin() + 1); + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Ops, ""); llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0}); @@ -677,6 +679,7 @@ ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[2]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }], ManualCodegenMask= [{ // Move mask to right before vl. @@ -698,6 +701,7 @@ let ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); + Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType)); }], ManualCodegenMask = [{ // Move mask to right before vl. diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -218,7 +218,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -238,7 +238,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -338,7 +338,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -498,7 +498,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -508,7 +508,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -518,7 +518,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -568,7 +568,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -578,7 +578,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -588,7 +588,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -598,7 +598,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -608,7 +608,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -668,7 +668,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -678,7 +678,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -688,7 +688,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -698,7 +698,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -728,7 +728,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -758,7 +758,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -768,7 +768,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -778,7 +778,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -788,7 +788,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -808,7 +808,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -818,7 +818,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -838,7 +838,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -858,7 +858,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -878,7 +878,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -898,7 +898,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -908,7 +908,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -928,7 +928,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -948,7 +948,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -958,7 +958,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -988,7 +988,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -998,7 +998,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1018,7 +1018,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1028,7 +1028,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1038,7 +1038,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1078,7 +1078,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1088,7 +1088,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1108,7 +1108,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -1128,7 +1128,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1138,7 +1138,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1168,7 +1168,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1178,7 +1178,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1198,7 +1198,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1209,7 +1209,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1229,7 +1229,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1249,7 +1249,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1259,7 +1259,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1279,7 +1279,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1289,7 +1289,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1299,7 +1299,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1309,7 +1309,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -1319,7 +1319,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1349,7 +1349,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -1369,7 +1369,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1379,7 +1379,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1389,7 +1389,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1399,7 +1399,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1409,7 +1409,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1429,7 +1429,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1439,7 +1439,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1459,7 +1459,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1469,7 +1469,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1479,7 +1479,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -1489,7 +1489,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1499,7 +1499,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1519,7 +1519,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -1529,7 +1529,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1549,7 +1549,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1559,7 +1559,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -1569,7 +1569,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1579,7 +1579,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1589,7 +1589,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { @@ -1609,7 +1609,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { @@ -1619,7 +1619,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1629,7 +1629,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1639,7 +1639,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -1649,7 +1649,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { @@ -1659,7 +1659,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1679,7 +1679,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -1699,7 +1699,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { @@ -1709,7 +1709,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { @@ -1719,7 +1719,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { @@ -1739,7 +1739,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -1749,7 +1749,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { @@ -1759,7 +1759,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1769,7 +1769,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1779,7 +1779,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1789,7 +1789,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { @@ -1799,7 +1799,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1809,7 +1809,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1819,7 +1819,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { @@ -1829,7 +1829,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { @@ -1839,7 +1839,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1849,7 +1849,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -1859,7 +1859,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { @@ -1869,7 +1869,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { @@ -1879,7 +1879,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { @@ -1909,7 +1909,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -18,7 +18,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -28,7 +28,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -38,7 +38,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -58,7 +58,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -68,7 +68,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -78,7 +78,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -108,7 +108,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -118,7 +118,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -128,7 +128,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -138,7 +138,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -158,7 +158,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -168,7 +168,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -198,7 +198,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -208,7 +208,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -218,7 +218,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -228,7 +228,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -238,7 +238,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -248,7 +248,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -288,7 +288,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -298,7 +298,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -308,7 +308,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -318,7 +318,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -328,7 +328,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -338,7 +338,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -348,7 +348,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -378,7 +378,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -388,7 +388,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -398,7 +398,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -408,7 +408,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -418,7 +418,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -428,7 +428,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -468,7 +468,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -488,7 +488,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -498,7 +498,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -508,7 +508,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -518,7 +518,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -528,7 +528,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -558,7 +558,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -568,7 +568,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -578,7 +578,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -588,7 +588,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -598,7 +598,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -608,7 +608,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -618,7 +618,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -648,7 +648,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -658,7 +658,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -668,7 +668,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -678,7 +678,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -688,7 +688,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -698,7 +698,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -708,7 +708,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -728,7 +728,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -738,7 +738,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -748,7 +748,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -758,7 +758,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -768,7 +768,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -778,7 +778,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -788,7 +788,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -798,7 +798,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -808,7 +808,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -818,7 +818,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -838,7 +838,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -848,7 +848,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -858,7 +858,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -868,7 +868,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -878,7 +878,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -888,7 +888,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -898,7 +898,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -908,7 +908,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -918,7 +918,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -928,7 +928,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -938,7 +938,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -948,7 +948,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -958,7 +958,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -968,7 +968,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -978,7 +978,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -988,7 +988,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -998,7 +998,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1008,7 +1008,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1018,7 +1018,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1028,7 +1028,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1038,7 +1038,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1048,7 +1048,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1058,7 +1058,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -1068,7 +1068,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1078,7 +1078,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1088,7 +1088,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1098,7 +1098,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1108,7 +1108,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1118,7 +1118,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -1128,7 +1128,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1138,7 +1138,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1148,7 +1148,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1158,7 +1158,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1168,7 +1168,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1178,7 +1178,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1188,7 +1188,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1198,7 +1198,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1208,7 +1208,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1218,7 +1218,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1228,7 +1228,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1238,7 +1238,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1248,7 +1248,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1258,7 +1258,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -1268,7 +1268,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1278,7 +1278,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1288,7 +1288,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1298,7 +1298,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1308,7 +1308,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -1318,7 +1318,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1328,7 +1328,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1338,7 +1338,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1348,7 +1348,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1358,7 +1358,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -1368,7 +1368,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1378,7 +1378,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1388,7 +1388,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1398,7 +1398,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1408,7 +1408,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1418,7 +1418,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1428,7 +1428,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1438,7 +1438,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -1448,7 +1448,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1458,7 +1458,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1468,7 +1468,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1478,7 +1478,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -1488,7 +1488,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1498,7 +1498,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1508,7 +1508,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1518,7 +1518,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -1528,7 +1528,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1548,7 +1548,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1558,7 +1558,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -1568,7 +1568,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1578,7 +1578,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1588,7 +1588,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1598,7 +1598,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { @@ -1608,7 +1608,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1628,7 +1628,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1638,7 +1638,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -1648,7 +1648,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { @@ -1658,7 +1658,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { @@ -1668,7 +1668,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1678,7 +1678,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { @@ -1688,7 +1688,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -1698,7 +1698,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { @@ -1718,7 +1718,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -1728,7 +1728,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { @@ -1738,7 +1738,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -1748,7 +1748,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { @@ -1758,7 +1758,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1768,7 +1768,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1778,7 +1778,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1788,7 +1788,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { @@ -1798,7 +1798,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1808,7 +1808,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1818,7 +1818,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { @@ -1828,7 +1828,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { @@ -1838,7 +1838,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1848,7 +1848,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -1858,7 +1858,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { @@ -1868,7 +1868,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { @@ -1878,7 +1878,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { @@ -1888,7 +1888,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -1898,7 +1898,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { @@ -1908,7 +1908,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) { @@ -19,7 +19,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) { @@ -29,7 +29,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) { @@ -39,7 +39,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) { @@ -49,7 +49,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) { @@ -59,7 +59,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) { @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) { @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) { @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) { @@ -139,7 +139,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) { @@ -149,7 +149,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) { @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) { @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) { @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) { @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) { @@ -239,7 +239,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vle8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) { @@ -299,7 +299,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) { @@ -309,7 +309,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) { @@ -319,7 +319,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) { @@ -329,7 +329,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) { @@ -379,7 +379,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) { @@ -389,7 +389,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) { @@ -399,7 +399,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) { @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) { @@ -469,7 +469,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) { @@ -479,7 +479,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) { @@ -489,7 +489,7 @@ // CHECK-RV64-LABEL: @test_vle32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) { @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) { @@ -509,7 +509,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vle64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) { @@ -1069,7 +1069,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) { @@ -1079,7 +1079,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { @@ -1099,7 +1099,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) { @@ -1109,7 +1109,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) { @@ -1119,7 +1119,7 @@ // CHECK-RV64-LABEL: @test_vle16_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -22,7 +22,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -48,7 +48,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -74,7 +74,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -87,7 +87,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -100,7 +100,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -113,7 +113,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -139,7 +139,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -165,7 +165,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv64i8.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -373,7 +373,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -399,7 +399,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -425,7 +425,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -438,7 +438,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -451,7 +451,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -464,7 +464,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -477,7 +477,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -490,7 +490,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -516,7 +516,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32i16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -685,7 +685,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -698,7 +698,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -711,7 +711,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -724,7 +724,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -737,7 +737,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -750,7 +750,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -763,7 +763,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -776,7 +776,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -802,7 +802,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -815,7 +815,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -828,7 +828,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -841,7 +841,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -854,7 +854,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -867,7 +867,7 @@ // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f32.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1075,7 +1075,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1088,7 +1088,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1101,7 +1101,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1114,7 +1114,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1127,7 +1127,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1140,7 +1140,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1153,7 +1153,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1166,7 +1166,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1192,7 +1192,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1205,7 +1205,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1218,7 +1218,7 @@ // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f64.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1387,7 +1387,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1400,7 +1400,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1413,7 +1413,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1426,7 +1426,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1439,7 +1439,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1452,7 +1452,7 @@ // CHECK-RV64-LABEL: @test_vle16ff_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32f16.i64( undef, * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -19,7 +19,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -29,7 +29,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -39,7 +39,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -49,7 +49,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -59,7 +59,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -139,7 +139,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -149,7 +149,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -239,7 +239,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -299,7 +299,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -309,7 +309,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -319,7 +319,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -329,7 +329,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -379,7 +379,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -389,7 +389,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -399,7 +399,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -469,7 +469,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -479,7 +479,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -489,7 +489,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -509,7 +509,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -559,7 +559,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -579,7 +579,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -589,7 +589,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -599,7 +599,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -669,7 +669,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -679,7 +679,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -689,7 +689,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -749,7 +749,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -759,7 +759,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -769,7 +769,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -779,7 +779,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -799,7 +799,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -809,7 +809,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -829,7 +829,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -839,7 +839,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -849,7 +849,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -869,7 +869,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -889,7 +889,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -899,7 +899,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -919,7 +919,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -929,7 +929,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -939,7 +939,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -979,7 +979,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -989,7 +989,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1009,7 +1009,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1019,7 +1019,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1029,7 +1029,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -1069,7 +1069,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1079,7 +1079,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1099,7 +1099,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1109,7 +1109,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1119,7 +1119,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1139,7 +1139,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1159,7 +1159,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1189,7 +1189,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1199,7 +1199,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1209,7 +1209,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1229,7 +1229,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1249,7 +1249,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1259,7 +1259,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1279,7 +1279,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1289,7 +1289,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1299,7 +1299,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1309,7 +1309,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -1319,7 +1319,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1349,7 +1349,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -1369,7 +1369,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1379,7 +1379,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1389,7 +1389,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1399,7 +1399,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1409,7 +1409,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1429,7 +1429,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1439,7 +1439,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1459,7 +1459,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1469,7 +1469,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1479,7 +1479,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -1489,7 +1489,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1499,7 +1499,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1519,7 +1519,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -1529,7 +1529,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1549,7 +1549,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1559,7 +1559,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -1569,7 +1569,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1579,7 +1579,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1589,7 +1589,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { @@ -1609,7 +1609,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { @@ -1619,7 +1619,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1629,7 +1629,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1639,7 +1639,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -1649,7 +1649,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { @@ -1659,7 +1659,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1679,7 +1679,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -1699,7 +1699,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { @@ -1709,7 +1709,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { @@ -1719,7 +1719,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { @@ -1739,7 +1739,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -1749,7 +1749,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { @@ -1759,7 +1759,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1769,7 +1769,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1779,7 +1779,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1789,7 +1789,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { @@ -1799,7 +1799,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1809,7 +1809,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1819,7 +1819,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { @@ -1829,7 +1829,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { @@ -1839,7 +1839,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1849,7 +1849,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -1859,7 +1859,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { @@ -1869,7 +1869,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { @@ -1879,7 +1879,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { @@ -1909,7 +1909,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { @@ -3829,7 +3829,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vloxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -3839,7 +3839,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vloxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -3849,7 +3849,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vloxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -3859,7 +3859,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vloxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -3869,7 +3869,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vloxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -3879,7 +3879,7 @@ // CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vloxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) { @@ -3889,7 +3889,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vloxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -3899,7 +3899,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vloxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -3909,7 +3909,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vloxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -3919,7 +3919,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vloxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -3929,7 +3929,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vloxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -3939,7 +3939,7 @@ // CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vloxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) { @@ -3949,7 +3949,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vloxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -3959,7 +3959,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vloxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -3969,7 +3969,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vloxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -3979,7 +3979,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vloxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -3989,7 +3989,7 @@ // CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vloxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -3999,7 +3999,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vloxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -4009,7 +4009,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vloxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -4019,7 +4019,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vloxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -4029,7 +4029,7 @@ // CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vloxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptrdiff_t bstride, @@ -20,7 +20,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptrdiff_t bstride, @@ -31,7 +31,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptrdiff_t bstride, @@ -42,7 +42,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptrdiff_t bstride, size_t vl) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptrdiff_t bstride, size_t vl) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptrdiff_t bstride, size_t vl) { @@ -72,7 +72,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv64i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv64i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptrdiff_t bstride, size_t vl) { @@ -82,7 +82,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptrdiff_t bstride, @@ -93,7 +93,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptrdiff_t bstride, @@ -104,7 +104,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptrdiff_t bstride, @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptrdiff_t bstride, @@ -126,7 +126,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptrdiff_t bstride, @@ -137,7 +137,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptrdiff_t bstride, @@ -148,7 +148,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptrdiff_t bstride, @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptrdiff_t bstride, @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptrdiff_t bstride, @@ -181,7 +181,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptrdiff_t bstride, @@ -192,7 +192,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptrdiff_t bstride, @@ -203,7 +203,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptrdiff_t bstride, @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptrdiff_t bstride, @@ -225,7 +225,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptrdiff_t bstride, @@ -236,7 +236,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptrdiff_t bstride, @@ -247,7 +247,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptrdiff_t bstride, @@ -258,7 +258,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptrdiff_t bstride, @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptrdiff_t bstride, @@ -280,7 +280,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptrdiff_t bstride, @@ -291,7 +291,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptrdiff_t bstride, @@ -302,7 +302,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptrdiff_t bstride, @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vlse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv64i8.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv64i8.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptrdiff_t bstride, @@ -324,7 +324,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptrdiff_t bstride, @@ -335,7 +335,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptrdiff_t bstride, @@ -346,7 +346,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptrdiff_t bstride, @@ -357,7 +357,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptrdiff_t bstride, @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptrdiff_t bstride, @@ -379,7 +379,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32i16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptrdiff_t bstride, @@ -390,7 +390,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptrdiff_t bstride, @@ -401,7 +401,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptrdiff_t bstride, @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptrdiff_t bstride, @@ -423,7 +423,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptrdiff_t bstride, @@ -434,7 +434,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16i32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptrdiff_t bstride, @@ -445,7 +445,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptrdiff_t bstride, @@ -456,7 +456,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptrdiff_t bstride, @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptrdiff_t bstride, @@ -478,7 +478,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8i64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptrdiff_t bstride, @@ -489,7 +489,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptrdiff_t bstride, @@ -500,7 +500,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptrdiff_t bstride, @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptrdiff_t bstride, @@ -522,7 +522,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptrdiff_t bstride, @@ -533,7 +533,7 @@ // CHECK-RV64-LABEL: @test_vlse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f32.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f32.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptrdiff_t bstride, @@ -544,7 +544,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptrdiff_t bstride, @@ -555,7 +555,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptrdiff_t bstride, @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptrdiff_t bstride, @@ -577,7 +577,7 @@ // CHECK-RV64-LABEL: @test_vlse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f64.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptrdiff_t bstride, @@ -1224,7 +1224,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vlse16_v_f16mf4 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { @@ -1234,7 +1234,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vlse16_v_f16mf2 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { @@ -1244,7 +1244,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vlse16_v_f16m1 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { @@ -1254,7 +1254,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vlse16_v_f16m2 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { @@ -1264,7 +1264,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vlse16_v_f16m4 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { @@ -1274,7 +1274,7 @@ // CHECK-RV64-LABEL: @test_vlse16_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32f16.i64( undef, * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vlse16_v_f16m8 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c @@ -9,7 +9,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -19,7 +19,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -29,7 +29,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -39,7 +39,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -49,7 +49,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -59,7 +59,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -69,7 +69,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -99,7 +99,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -109,7 +109,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -119,7 +119,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -129,7 +129,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -139,7 +139,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -149,7 +149,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -159,7 +159,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -189,7 +189,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -199,7 +199,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -209,7 +209,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -219,7 +219,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -229,7 +229,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -239,7 +239,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -249,7 +249,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -279,7 +279,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -289,7 +289,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -299,7 +299,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -309,7 +309,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -319,7 +319,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -329,7 +329,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -339,7 +339,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -369,7 +369,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -379,7 +379,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -389,7 +389,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -399,7 +399,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -409,7 +409,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -419,7 +419,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -429,7 +429,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -459,7 +459,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -469,7 +469,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -479,7 +479,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -489,7 +489,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -499,7 +499,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -509,7 +509,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -519,7 +519,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -549,7 +549,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -559,7 +559,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -569,7 +569,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -579,7 +579,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -589,7 +589,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -599,7 +599,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -609,7 +609,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -639,7 +639,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -649,7 +649,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -659,7 +659,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -669,7 +669,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -679,7 +679,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -689,7 +689,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -699,7 +699,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -729,7 +729,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -739,7 +739,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -749,7 +749,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -759,7 +759,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -769,7 +769,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -779,7 +779,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -789,7 +789,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -799,7 +799,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -809,7 +809,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -819,7 +819,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -829,7 +829,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -839,7 +839,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -849,7 +849,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -859,7 +859,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -869,7 +869,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -879,7 +879,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -889,7 +889,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -899,7 +899,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -909,7 +909,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -919,7 +919,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -929,7 +929,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -939,7 +939,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -949,7 +949,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -959,7 +959,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -969,7 +969,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -979,7 +979,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -989,7 +989,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -999,7 +999,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1009,7 +1009,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1019,7 +1019,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1029,7 +1029,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1039,7 +1039,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1049,7 +1049,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1059,7 +1059,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -1069,7 +1069,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1079,7 +1079,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1089,7 +1089,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1099,7 +1099,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1109,7 +1109,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1119,7 +1119,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -1129,7 +1129,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1139,7 +1139,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1149,7 +1149,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1159,7 +1159,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1179,7 +1179,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1189,7 +1189,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1199,7 +1199,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1209,7 +1209,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1219,7 +1219,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1229,7 +1229,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1239,7 +1239,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1249,7 +1249,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -1259,7 +1259,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -1269,7 +1269,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1279,7 +1279,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1289,7 +1289,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -1299,7 +1299,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -1309,7 +1309,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -1319,7 +1319,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1329,7 +1329,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -1349,7 +1349,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -1359,7 +1359,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -1369,7 +1369,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -1379,7 +1379,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -1389,7 +1389,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -1399,7 +1399,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1409,7 +1409,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1419,7 +1419,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1429,7 +1429,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1439,7 +1439,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -1449,7 +1449,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1459,7 +1459,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1469,7 +1469,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1479,7 +1479,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -1489,7 +1489,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1499,7 +1499,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1509,7 +1509,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1519,7 +1519,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -1529,7 +1529,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1539,7 +1539,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1549,7 +1549,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1559,7 +1559,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -1569,7 +1569,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -1579,7 +1579,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -1589,7 +1589,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -1599,7 +1599,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { @@ -1609,7 +1609,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { @@ -1619,7 +1619,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -1629,7 +1629,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -1639,7 +1639,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -1649,7 +1649,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { @@ -1659,7 +1659,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { @@ -1669,7 +1669,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -1679,7 +1679,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { @@ -1689,7 +1689,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -1699,7 +1699,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { @@ -1709,7 +1709,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { @@ -1719,7 +1719,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -1729,7 +1729,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { @@ -1739,7 +1739,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -1749,7 +1749,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { @@ -1759,7 +1759,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -1769,7 +1769,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -1779,7 +1779,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -1789,7 +1789,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { @@ -1799,7 +1799,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -1809,7 +1809,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -1819,7 +1819,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { @@ -1829,7 +1829,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { @@ -1839,7 +1839,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -1849,7 +1849,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -1859,7 +1859,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { @@ -1869,7 +1869,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { @@ -1879,7 +1879,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -1899,7 +1899,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { @@ -1909,7 +1909,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { @@ -3829,7 +3829,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vluxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) { @@ -3839,7 +3839,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vluxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) { @@ -3849,7 +3849,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vluxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) { @@ -3859,7 +3859,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vluxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) { @@ -3869,7 +3869,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vluxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) { @@ -3879,7 +3879,7 @@ // CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vluxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) { @@ -3889,7 +3889,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vluxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) { @@ -3899,7 +3899,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vluxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) { @@ -3909,7 +3909,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vluxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) { @@ -3919,7 +3919,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vluxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) { @@ -3929,7 +3929,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vluxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) { @@ -3939,7 +3939,7 @@ // CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m8_t test_vluxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) { @@ -3949,7 +3949,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vluxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) { @@ -3959,7 +3959,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vluxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) { @@ -3969,7 +3969,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vluxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) { @@ -3979,7 +3979,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vluxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) { @@ -3989,7 +3989,7 @@ // CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m4_t test_vluxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) { @@ -3999,7 +3999,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf4_t test_vluxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) { @@ -4009,7 +4009,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16mf2_t test_vluxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) { @@ -4019,7 +4019,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m1_t test_vluxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) { @@ -4029,7 +4029,7 @@ // CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64( undef, * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat16m2_t test_vluxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) { diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -175,26 +175,37 @@ ImmArg>, ImmArg>]>; - // For unit stride load + // For unit stride mask load // Input: (pointer, vl) - class RISCVUSLoad + class RISCVUSMLoad : Intrinsic<[llvm_anyvector_ty], [LLVMPointerType>, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { let VLOperand = 1; } + // For unit stride load + // Input: (passthru, pointer, vl) + class RISCVUSLoad + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMPointerType>, + llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } // For unit stride fault-only-first load - // Input: (pointer, vl) + // Input: (passthru, pointer, vl) // Output: (data, vl) // NOTE: We model this with default memory properties since we model writing // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. class RISCVUSLoadFF : Intrinsic<[llvm_anyvector_ty, llvm_anyint_ty], - [LLVMPointerType>, LLVMMatchType<1>], - [NoCapture>]>, + [LLVMMatchType<0>, + LLVMPointerType>, LLVMMatchType<1>], + [NoCapture>]>, RISCVVIntrinsic { - let VLOperand = 1; + let VLOperand = 2; } // For unit stride load with mask // Input: (maskedoff, pointer, mask, vl, ta) @@ -222,14 +233,15 @@ [NoCapture>, ImmArg>]>, RISCVVIntrinsic { let VLOperand = 3; } - // For strided load - // Input: (pointer, stride, vl) + // For strided load with passthru operand + // Input: (passthru, pointer, stride, vl) class RISCVSLoad : Intrinsic<[llvm_anyvector_ty], - [LLVMPointerType>, + [LLVMMatchType<0>, + LLVMPointerType>, llvm_anyint_ty, LLVMMatchType<1>], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { - let VLOperand = 2; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 3; } // For strided load with mask // Input: (maskedoff, pointer, stride, mask, vl, ta) @@ -243,14 +255,15 @@ RISCVVIntrinsic { let VLOperand = 4; } - // For indexed load - // Input: (pointer, index, vl) + // For indexed load with passthru operand + // Input: (passthru, pointer, index, vl) class RISCVILoad : Intrinsic<[llvm_anyvector_ty], - [LLVMPointerType>, + [LLVMMatchType<0>, + LLVMPointerType>, llvm_anyvector_ty, llvm_anyint_ty], - [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { - let VLOperand = 2; + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { + let VLOperand = 3; } // For indexed load with mask // Input: (maskedoff, pointer, index, mask, vl, ta) @@ -1124,7 +1137,7 @@ defm vsoxei : RISCVIStore; defm vsuxei : RISCVIStore; - def int_riscv_vlm : RISCVUSLoad; + def int_riscv_vlm : RISCVUSMLoad; def int_riscv_vsm : RISCVUSStore; defm vadd : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -161,6 +161,7 @@ struct VLEPseudo { uint16_t Masked : 1; + uint16_t IsTU : 1; uint16_t Strided : 1; uint16_t FF : 1; uint16_t Log2SEW : 3; @@ -178,6 +179,7 @@ struct VLX_VSXPseudo { uint16_t Masked : 1; + uint16_t IsTU : 1; uint16_t Ordered : 1; uint16_t Log2SEW : 3; uint16_t LMUL : 3; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -86,8 +86,12 @@ SDVTList VTs = CurDAG->getVTList({VT, MVT::Other}); SDValue IntID = CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64); - SDValue Ops[] = {Chain, IntID, StackSlot, - CurDAG->getRegister(RISCV::X0, MVT::i64), VL}; + SDValue Ops[] = {Chain, + IntID, + CurDAG->getUNDEF(VT), + StackSlot, + CurDAG->getRegister(RISCV::X0, MVT::i64), + VL}; SDValue Result = CurDAG->getMemIntrinsicNode( ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64, MPI, Align(8), @@ -1210,9 +1214,14 @@ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); unsigned CurOp = 2; + // Masked intrinsic only have TU version pseduo instructions. + bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef()); SmallVector Operands; - if (IsMasked) + if (IsTU) Operands.push_back(Node->getOperand(CurOp++)); + else + // Skip the undef passthru operand for nomask TA version pseudo + CurOp++; MVT IndexVT; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, @@ -1230,7 +1239,7 @@ "values when XLEN=32"); } const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( - IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), + IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); @@ -1255,16 +1264,25 @@ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); unsigned CurOp = 2; + // The riscv_vlm intrinsic are always tail agnostic and no passthru operand. + bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm; + // Masked intrinsic only have TU version pseduo instructions. + bool IsTU = + HasPassthruOperand && + ((!IsMasked && !Node->getOperand(CurOp).isUndef()) || IsMasked); SmallVector Operands; - if (IsMasked) + if (IsTU) Operands.push_back(Node->getOperand(CurOp++)); + else if (HasPassthruOperand) + // Skip the undef passthru operand for nomask TA version pseudo + CurOp++; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands, /*IsLoad=*/true); RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = - RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW, + RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); @@ -1283,9 +1301,14 @@ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); unsigned CurOp = 2; + // Masked intrinsic only have TU version pseduo instructions. + bool IsTU = IsMasked || (!IsMasked && !Node->getOperand(CurOp).isUndef()); SmallVector Operands; - if (IsMasked) + if (IsTU) Operands.push_back(Node->getOperand(CurOp++)); + else + // Skip the undef passthru operand for nomask TA version pseudo + CurOp++; addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, /*IsStridedOrIndexed*/ false, Operands, @@ -1293,8 +1316,8 @@ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = - RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW, - static_cast(LMUL)); + RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, + Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), MVT::Other, MVT::Glue, Operands); @@ -1424,8 +1447,8 @@ "values when XLEN=32"); } const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( - IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), - static_cast(IndexLMUL)); + IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW, + static_cast(LMUL), static_cast(IndexLMUL)); MachineSDNode *Store = CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); @@ -1622,8 +1645,8 @@ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo( - /*IsMasked*/ false, /*IsStrided*/ true, /*FF*/ false, Log2SEW, - static_cast(LMUL)); + /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false, + Log2SEW, static_cast(LMUL)); MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2452,8 +2452,12 @@ SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT); - SDValue Ops[] = {Ld->getChain(), IntID, NewAddr, - DAG.getRegister(RISCV::X0, XLenVT), VL}; + SDValue Ops[] = {Ld->getChain(), + IntID, + DAG.getUNDEF(ContainerVT), + NewAddr, + DAG.getRegister(RISCV::X0, XLenVT), + VL}; SDValue NewLoad = DAG.getMemIntrinsicNode( ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT, DAG.getMachineFunction().getMachineMemOperand( @@ -4574,7 +4578,9 @@ auto *Load = cast(Op); SmallVector Ops{Load->getChain(), IntID}; - if (!IsUnmasked) + if (IsUnmasked) + Ops.push_back(DAG.getUNDEF(ContainerVT)); + else Ops.push_back(PassThru); Ops.push_back(Op.getOperand(3)); // Ptr Ops.push_back(Op.getOperand(4)); // Stride @@ -5432,7 +5438,9 @@ unsigned IntID = IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask; SmallVector Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; - if (!IsUnmasked) + if (IsUnmasked) + Ops.push_back(DAG.getUNDEF(ContainerVT)); + else Ops.push_back(PassThru); Ops.push_back(BasePtr); if (!IsUnmasked) @@ -5813,7 +5821,9 @@ unsigned IntID = IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask; SmallVector Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; - if (!IsUnmasked) + if (IsUnmasked) + Ops.push_back(DAG.getUNDEF(ContainerVT)); + else Ops.push_back(PassThru); Ops.push_back(BasePtr); Ops.push_back(Index); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -424,8 +424,9 @@ let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } -class RISCVVLE S, bits<3> L> { +class RISCVVLE S, bits<3> L> { bits<1> Masked = M; + bits<1> IsTU = TU; bits<1> Strided = Str; bits<1> FF = F; bits<3> Log2SEW = S; @@ -436,8 +437,8 @@ def RISCVVLETable : GenericTable { let FilterClass = "RISCVVLE"; let CppTypeName = "VLEPseudo"; - let Fields = ["Masked", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"]; - let PrimaryKey = ["Masked", "Strided", "FF", "Log2SEW", "LMUL"]; + let Fields = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "IsTU", "Strided", "FF", "Log2SEW", "LMUL"]; let PrimaryKeyName = "getVLEPseudo"; } @@ -457,8 +458,9 @@ let PrimaryKeyName = "getVSEPseudo"; } -class RISCVVLX_VSX S, bits<3> L, bits<3> IL> { +class RISCVVLX_VSX S, bits<3> L, bits<3> IL> { bits<1> Masked = M; + bits<1> IsTU = TU; bits<1> Ordered = O; bits<3> Log2SEW = S; bits<3> LMUL = L; @@ -466,15 +468,15 @@ Pseudo Pseudo = !cast(NAME); } -class RISCVVLX S, bits<3> L, bits<3> IL> : - RISCVVLX_VSX; +class RISCVVLX S, bits<3> L, bits<3> IL> : + RISCVVLX_VSX; class RISCVVSX S, bits<3> L, bits<3> IL> : - RISCVVLX_VSX; + RISCVVLX_VSX; class RISCVVLX_VSXTable : GenericTable { let CppTypeName = "VLX_VSXPseudo"; - let Fields = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"]; - let PrimaryKey = ["Masked", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"]; + let Fields = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL", "Pseudo"]; + let PrimaryKey = ["Masked", "IsTU", "Ordered", "Log2SEW", "LMUL", "IndexLMUL"]; } def RISCVVLXTable : RISCVVLX_VSXTable { @@ -629,7 +631,7 @@ Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -639,13 +641,29 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoUSLoadNoMaskTU : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLE.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $dest"; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUSLoadMask : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, GPR:$rs1, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -661,13 +679,29 @@ Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoSLoadNoMaskTU: + Pseudo<(outs RetClass:$rd), + (ins RetClass:$dest, GPR:$rs1, GPR:$rs2, AVL:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; let HasVLOp = 1; let HasSEWOp = 1; let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = "$rd = $dest"; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -677,7 +711,7 @@ GPR:$rs1, GPR:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLE.val, VLMul> { + RISCVVLE.val, VLMul> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -692,9 +726,10 @@ class VPseudoILoadNoMask LMUL, bit Ordered, bit EarlyClobber>: Pseudo<(outs RetClass:$rd), - (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, ixlenimm:$sew),[]>, + (ins GPR:$rs1, IdxClass:$rs2, AVL:$vl, + ixlenimm:$sew),[]>, RISCVVPseudo, - RISCVVLX.val, VLMul, LMUL> { + RISCVVLX.val, VLMul, LMUL> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -705,6 +740,24 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoILoadNoMaskTU LMUL, + bit Ordered, bit EarlyClobber>: + Pseudo<(outs RetClass:$rd), + (ins RetClass:$dest, GPR:$rs1, IdxClass:$rs2, AVL:$vl, + ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVVLX.val, VLMul, LMUL> { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasMergeOp = 1; + let Constraints = !if(!eq(EarlyClobber, 1), "@earlyclobber $rd, $rd = $dest", "$rd = $dest"); + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoILoadMask LMUL, bit Ordered, bit EarlyClobber>: Pseudo<(outs GetVRegNoV0.R:$rd), @@ -712,7 +765,7 @@ GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>, RISCVVPseudo, - RISCVVLX.val, VLMul, LMUL> { + RISCVVLX.val, VLMul, LMUL> { let mayLoad = 1; let mayStore = 0; let hasSideEffects = 0; @@ -1347,6 +1400,9 @@ def "E" # eew # "_V_" # LInfo : VPseudoUSLoadNoMask, VLESched; + def "E" # eew # "_V_" # LInfo # "_TU": + VPseudoUSLoadNoMaskTU, + VLESched; def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSLoadMask, VLESched; @@ -1364,6 +1420,9 @@ def "E" # eew # "FF_V_" # LInfo : VPseudoUSLoadNoMask, VLFSched; + def "E" # eew # "FF_V_" # LInfo # "_TU": + VPseudoUSLoadNoMaskTU, + VLFSched; def "E" # eew # "FF_V_" # LInfo # "_MASK" : VPseudoUSLoadMask, VLFSched; @@ -1388,6 +1447,8 @@ let VLMul = lmul.value in { def "E" # eew # "_V_" # LInfo : VPseudoSLoadNoMask, VLSSched; + def "E" # eew # "_V_" # LInfo # "_TU": VPseudoSLoadNoMaskTU, + VLSSched; def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSLoadMask, VLSSched; } @@ -1414,6 +1475,9 @@ def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo : VPseudoILoadNoMask, VLXSched; + def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_TU": + VPseudoILoadNoMaskTU, + VLXSched; def "EI" # eew # "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask, VLXSched; diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll @@ -95,7 +95,7 @@ ret i32 %vl } -declare @llvm.riscv.vle.nxv4i32.i32(*, i32) +declare @llvm.riscv.vle.nxv4i32.i32(, *, i32) ; Check that we remove the redundant vsetvli when followed by another operation define @redundant_vsetvli(i32 %avl, * %ptr) nounwind { @@ -105,7 +105,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1) - %x = call @llvm.riscv.vle.nxv4i32.i32(* %ptr, i32 %vl) + %x = call @llvm.riscv.vle.nxv4i32.i32( undef, * %ptr, i32 %vl) ret %x } @@ -122,6 +122,6 @@ ; CHECK-NEXT: ret %vl0 = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1) %vl1 = call i32 @llvm.riscv.vsetvli.i32(i32 %vl0, i32 2, i32 1) - %x = call @llvm.riscv.vle.nxv4i32.i32(* %ptr, i32 %vl1) + %x = call @llvm.riscv.vle.nxv4i32.i32( undef, * %ptr, i32 %vl1) ret %x } diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll @@ -113,7 +113,7 @@ ret i64 %vl } -declare @llvm.riscv.vle.nxv4i32.i64(*, i64) +declare @llvm.riscv.vle.nxv4i32.i64(, *, i64) ; Check that we remove the redundant vsetvli when followed by another operation define @redundant_vsetvli(i64 %avl, * %ptr) nounwind { @@ -123,7 +123,7 @@ ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1) - %x = call @llvm.riscv.vle.nxv4i32.i64(* %ptr, i64 %vl) + %x = call @llvm.riscv.vle.nxv4i32.i64( undef, * %ptr, i64 %vl) ret %x } @@ -140,6 +140,6 @@ ; CHECK-NEXT: ret %vl0 = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1) %vl1 = call i64 @llvm.riscv.vsetvli.i64(i64 %vl0, i64 2, i64 1) - %x = call @llvm.riscv.vle.nxv4i32.i64(* %ptr, i64 %vl1) + %x = call @llvm.riscv.vle.nxv4i32.i64( undef, * %ptr, i64 %vl1) ret %x } diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -168,7 +168,7 @@ %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %input, i64 0, i64 0 %2 = load i64, i64* %vl, align 8 %3 = bitcast i32* %arraydecay to * - %4 = call @llvm.riscv.vle.nxv16i32.i64(* %3, i64 %2) + %4 = call @llvm.riscv.vle.nxv16i32.i64( undef, * %3, i64 %2) store %4, * %v0, align 4 store i32 1, i32* %x0, align 4 store i32 1, i32* %x1, align 4 @@ -211,6 +211,6 @@ declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv16i32.i64(* nocapture, i64) +declare @llvm.riscv.vle.nxv16i32.i64(, * nocapture, i64) attributes #0 = { noinline nounwind optnone "frame-pointer"="all" } diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -0,0 +1,120 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefix=RV64 + +declare @llvm.riscv.vle.nxv1i8( + , + *, + iXLen); + +define @intrinsic_vle_v_tu_nxv1i8_nxv1i8( %0, * %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vle8.v v8, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vle_v_tu_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vle8.v v8, (a0) +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i8( + %0, + * %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse( + , + *, + iXLen, + iXLen); + + +define @intrinsic_vlse_v_tu( %0, * %1, iXLen %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vlse_v_tu: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a2, e8, mf8, tu, mu +; RV32-NEXT: vlse8.v v8, (a0), a1 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vlse_v_tu: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a2, e8, mf8, tu, mu +; RV64-NEXT: vlse8.v v8, (a0), a1 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vlse( + %0, + * %1, + iXLen %2, + iXLen %3) + + ret %a +} + +declare { , iXLen } @llvm.riscv.vleff( + , + *, + iXLen); + +define @intrinsic_vleff_v_tu( %0, * %1, iXLen %2, iXLen* %3) nounwind { +; RV32-LABEL: intrinsic_vleff_v_tu: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vle8ff.v v8, (a0) +; RV32-NEXT: csrr a0, vl +; RV32-NEXT: sw a0, 0(a2) +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vleff_v_tu: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vle8ff.v v8, (a0) +; RV64-NEXT: csrr a0, vl +; RV64-NEXT: sd a0, 0(a2) +; RV64-NEXT: ret +entry: + %a = call { , iXLen } @llvm.riscv.vleff( + %0, + * %1, + iXLen %2) + %b = extractvalue { , iXLen } %a, 0 + %c = extractvalue { , iXLen } %a, 1 + store iXLen %c, iXLen* %3 + ret %b +} + +declare @llvm.riscv.vloxei.nxv1i8( + , + *, + , + iXLen); + +define @intrinsic_vloxei_v_tu_nxv1i8_nxv1i8( %0, * %1, %2, iXLen %3) nounwind { +; RV32-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV32-NEXT: vloxei8.v v8, (a0), v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vloxei_v_tu_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu +; RV64-NEXT: vloxei8.v v8, (a0), v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vloxei.nxv1i8( + %0, + * %1, + %2, + iXLen %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll @@ -3,6 +3,7 @@ ; RUN: -mattr=+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vle.nxv1i64( + , *, i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i64( + undef, * %0, i32 %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.vle.nxv2i64( + , *, i32); @@ -55,6 +58,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i64( + undef, * %0, i32 %1) @@ -85,6 +89,7 @@ } declare @llvm.riscv.vle.nxv4i64( + , *, i32); @@ -96,6 +101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i64( + undef, * %0, i32 %1) @@ -126,6 +132,7 @@ } declare @llvm.riscv.vle.nxv8i64( + , *, i32); @@ -137,6 +144,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i64( + undef, * %0, i32 %1) @@ -167,6 +175,7 @@ } declare @llvm.riscv.vle.nxv1f64( + , *, i32); @@ -178,6 +187,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1f64( + undef, * %0, i32 %1) @@ -208,6 +218,7 @@ } declare @llvm.riscv.vle.nxv2f64( + , *, i32); @@ -219,6 +230,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2f64( + undef, * %0, i32 %1) @@ -249,6 +261,7 @@ } declare @llvm.riscv.vle.nxv4f64( + , *, i32); @@ -260,6 +273,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4f64( + undef, * %0, i32 %1) @@ -290,6 +304,7 @@ } declare @llvm.riscv.vle.nxv8f64( + , *, i32); @@ -301,6 +316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8f64( + undef, * %0, i32 %1) @@ -331,6 +347,7 @@ } declare @llvm.riscv.vle.nxv1i32( + , *, i32); @@ -342,6 +359,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i32( + undef, * %0, i32 %1) @@ -372,6 +390,7 @@ } declare @llvm.riscv.vle.nxv2i32( + , *, i32); @@ -383,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i32( + undef, * %0, i32 %1) @@ -413,6 +433,7 @@ } declare @llvm.riscv.vle.nxv4i32( + , *, i32); @@ -424,6 +445,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i32( + undef, * %0, i32 %1) @@ -454,6 +476,7 @@ } declare @llvm.riscv.vle.nxv8i32( + , *, i32); @@ -465,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i32( + undef, * %0, i32 %1) @@ -495,6 +519,7 @@ } declare @llvm.riscv.vle.nxv16i32( + , *, i32); @@ -506,6 +531,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16i32( + undef, * %0, i32 %1) @@ -536,6 +562,7 @@ } declare @llvm.riscv.vle.nxv1f32( + , *, i32); @@ -547,6 +574,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1f32( + undef, * %0, i32 %1) @@ -577,6 +605,7 @@ } declare @llvm.riscv.vle.nxv2f32( + , *, i32); @@ -588,6 +617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2f32( + undef, * %0, i32 %1) @@ -618,6 +648,7 @@ } declare @llvm.riscv.vle.nxv4f32( + , *, i32); @@ -629,6 +660,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4f32( + undef, * %0, i32 %1) @@ -659,6 +691,7 @@ } declare @llvm.riscv.vle.nxv8f32( + , *, i32); @@ -670,6 +703,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8f32( + undef, * %0, i32 %1) @@ -700,6 +734,7 @@ } declare @llvm.riscv.vle.nxv16f32( + , *, i32); @@ -711,6 +746,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16f32( + undef, * %0, i32 %1) @@ -741,6 +777,7 @@ } declare @llvm.riscv.vle.nxv1i16( + , *, i32); @@ -752,6 +789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i16( + undef, * %0, i32 %1) @@ -782,6 +820,7 @@ } declare @llvm.riscv.vle.nxv2i16( + , *, i32); @@ -793,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i16( + undef, * %0, i32 %1) @@ -823,6 +863,7 @@ } declare @llvm.riscv.vle.nxv4i16( + , *, i32); @@ -834,6 +875,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i16( + undef, * %0, i32 %1) @@ -864,6 +906,7 @@ } declare @llvm.riscv.vle.nxv8i16( + , *, i32); @@ -875,6 +918,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i16( + undef, * %0, i32 %1) @@ -905,6 +949,7 @@ } declare @llvm.riscv.vle.nxv16i16( + , *, i32); @@ -916,6 +961,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16i16( + undef, * %0, i32 %1) @@ -946,6 +992,7 @@ } declare @llvm.riscv.vle.nxv32i16( + , *, i32); @@ -957,6 +1004,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv32i16( + undef, * %0, i32 %1) @@ -987,6 +1035,7 @@ } declare @llvm.riscv.vle.nxv1f16( + , *, i32); @@ -998,6 +1047,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1f16( + undef, * %0, i32 %1) @@ -1028,6 +1078,7 @@ } declare @llvm.riscv.vle.nxv2f16( + , *, i32); @@ -1039,6 +1090,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2f16( + undef, * %0, i32 %1) @@ -1069,6 +1121,7 @@ } declare @llvm.riscv.vle.nxv4f16( + , *, i32); @@ -1080,6 +1133,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4f16( + undef, * %0, i32 %1) @@ -1110,6 +1164,7 @@ } declare @llvm.riscv.vle.nxv8f16( + , *, i32); @@ -1121,6 +1176,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8f16( + undef, * %0, i32 %1) @@ -1151,6 +1207,7 @@ } declare @llvm.riscv.vle.nxv16f16( + , *, i32); @@ -1162,6 +1219,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16f16( + undef, * %0, i32 %1) @@ -1192,6 +1250,7 @@ } declare @llvm.riscv.vle.nxv32f16( + , *, i32); @@ -1203,6 +1262,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv32f16( + undef, * %0, i32 %1) @@ -1233,6 +1293,7 @@ } declare @llvm.riscv.vle.nxv1i8( + , *, i32); @@ -1244,6 +1305,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i8( + undef, * %0, i32 %1) @@ -1274,6 +1336,7 @@ } declare @llvm.riscv.vle.nxv2i8( + , *, i32); @@ -1285,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i8( + undef, * %0, i32 %1) @@ -1315,6 +1379,7 @@ } declare @llvm.riscv.vle.nxv4i8( + , *, i32); @@ -1326,6 +1391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i8( + undef, * %0, i32 %1) @@ -1356,6 +1422,7 @@ } declare @llvm.riscv.vle.nxv8i8( + , *, i32); @@ -1367,6 +1434,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i8( + undef, * %0, i32 %1) @@ -1397,6 +1465,7 @@ } declare @llvm.riscv.vle.nxv16i8( + , *, i32); @@ -1408,6 +1477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16i8( + undef, * %0, i32 %1) @@ -1438,6 +1508,7 @@ } declare @llvm.riscv.vle.nxv32i8( + , *, i32); @@ -1449,6 +1520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv32i8( + undef, * %0, i32 %1) @@ -1479,6 +1551,7 @@ } declare @llvm.riscv.vle.nxv64i8( + , *, i32); @@ -1490,6 +1563,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv64i8( + undef, * %0, i32 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll @@ -3,6 +3,7 @@ ; RUN: -mattr=+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vle.nxv1i64( + , *, i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i64( + undef, * %0, i64 %1) @@ -44,6 +46,7 @@ } declare @llvm.riscv.vle.nxv2i64( + , *, i64); @@ -55,6 +58,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i64( + undef, * %0, i64 %1) @@ -85,6 +89,7 @@ } declare @llvm.riscv.vle.nxv4i64( + , *, i64); @@ -96,6 +101,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i64( + undef, * %0, i64 %1) @@ -126,6 +132,7 @@ } declare @llvm.riscv.vle.nxv8i64( + , *, i64); @@ -137,6 +144,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i64( + undef, * %0, i64 %1) @@ -167,6 +175,7 @@ } declare @llvm.riscv.vle.nxv1f64( + , *, i64); @@ -178,6 +187,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1f64( + undef, * %0, i64 %1) @@ -208,6 +218,7 @@ } declare @llvm.riscv.vle.nxv2f64( + , *, i64); @@ -219,6 +230,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2f64( + undef, * %0, i64 %1) @@ -249,6 +261,7 @@ } declare @llvm.riscv.vle.nxv4f64( + , *, i64); @@ -260,6 +273,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4f64( + undef, * %0, i64 %1) @@ -290,6 +304,7 @@ } declare @llvm.riscv.vle.nxv8f64( + , *, i64); @@ -301,6 +316,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8f64( + undef, * %0, i64 %1) @@ -331,6 +347,7 @@ } declare @llvm.riscv.vle.nxv1i32( + , *, i64); @@ -342,6 +359,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i32( + undef, * %0, i64 %1) @@ -372,6 +390,7 @@ } declare @llvm.riscv.vle.nxv2i32( + , *, i64); @@ -383,6 +402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i32( + undef, * %0, i64 %1) @@ -413,6 +433,7 @@ } declare @llvm.riscv.vle.nxv4i32( + , *, i64); @@ -424,6 +445,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i32( + undef, * %0, i64 %1) @@ -454,6 +476,7 @@ } declare @llvm.riscv.vle.nxv8i32( + , *, i64); @@ -465,6 +488,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i32( + undef, * %0, i64 %1) @@ -495,6 +519,7 @@ } declare @llvm.riscv.vle.nxv16i32( + , *, i64); @@ -506,6 +531,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16i32( + undef, * %0, i64 %1) @@ -536,6 +562,7 @@ } declare @llvm.riscv.vle.nxv1f32( + , *, i64); @@ -547,6 +574,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1f32( + undef, * %0, i64 %1) @@ -577,6 +605,7 @@ } declare @llvm.riscv.vle.nxv2f32( + , *, i64); @@ -588,6 +617,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2f32( + undef, * %0, i64 %1) @@ -618,6 +648,7 @@ } declare @llvm.riscv.vle.nxv4f32( + , *, i64); @@ -629,6 +660,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4f32( + undef, * %0, i64 %1) @@ -659,6 +691,7 @@ } declare @llvm.riscv.vle.nxv8f32( + , *, i64); @@ -670,6 +703,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8f32( + undef, * %0, i64 %1) @@ -700,6 +734,7 @@ } declare @llvm.riscv.vle.nxv16f32( + , *, i64); @@ -711,6 +746,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16f32( + undef, * %0, i64 %1) @@ -741,6 +777,7 @@ } declare @llvm.riscv.vle.nxv1i16( + , *, i64); @@ -752,6 +789,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i16( + undef, * %0, i64 %1) @@ -782,6 +820,7 @@ } declare @llvm.riscv.vle.nxv2i16( + , *, i64); @@ -793,6 +832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i16( + undef, * %0, i64 %1) @@ -823,6 +863,7 @@ } declare @llvm.riscv.vle.nxv4i16( + , *, i64); @@ -834,6 +875,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i16( + undef, * %0, i64 %1) @@ -864,6 +906,7 @@ } declare @llvm.riscv.vle.nxv8i16( + , *, i64); @@ -875,6 +918,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i16( + undef, * %0, i64 %1) @@ -905,6 +949,7 @@ } declare @llvm.riscv.vle.nxv16i16( + , *, i64); @@ -916,6 +961,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16i16( + undef, * %0, i64 %1) @@ -946,6 +992,7 @@ } declare @llvm.riscv.vle.nxv32i16( + , *, i64); @@ -957,6 +1004,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv32i16( + undef, * %0, i64 %1) @@ -987,6 +1035,7 @@ } declare @llvm.riscv.vle.nxv1f16( + , *, i64); @@ -998,6 +1047,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1f16( + undef, * %0, i64 %1) @@ -1028,6 +1078,7 @@ } declare @llvm.riscv.vle.nxv2f16( + , *, i64); @@ -1039,6 +1090,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2f16( + undef, * %0, i64 %1) @@ -1069,6 +1121,7 @@ } declare @llvm.riscv.vle.nxv4f16( + , *, i64); @@ -1080,6 +1133,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4f16( + undef, * %0, i64 %1) @@ -1110,6 +1164,7 @@ } declare @llvm.riscv.vle.nxv8f16( + , *, i64); @@ -1121,6 +1176,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8f16( + undef, * %0, i64 %1) @@ -1151,6 +1207,7 @@ } declare @llvm.riscv.vle.nxv16f16( + , *, i64); @@ -1162,6 +1219,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16f16( + undef, * %0, i64 %1) @@ -1192,6 +1250,7 @@ } declare @llvm.riscv.vle.nxv32f16( + , *, i64); @@ -1203,6 +1262,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv32f16( + undef, * %0, i64 %1) @@ -1233,6 +1293,7 @@ } declare @llvm.riscv.vle.nxv1i8( + , *, i64); @@ -1244,6 +1305,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv1i8( + undef, * %0, i64 %1) @@ -1274,6 +1336,7 @@ } declare @llvm.riscv.vle.nxv2i8( + , *, i64); @@ -1285,6 +1348,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv2i8( + undef, * %0, i64 %1) @@ -1315,6 +1379,7 @@ } declare @llvm.riscv.vle.nxv4i8( + , *, i64); @@ -1326,6 +1391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv4i8( + undef, * %0, i64 %1) @@ -1356,6 +1422,7 @@ } declare @llvm.riscv.vle.nxv8i8( + , *, i64); @@ -1367,6 +1434,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv8i8( + undef, * %0, i64 %1) @@ -1397,6 +1465,7 @@ } declare @llvm.riscv.vle.nxv16i8( + , *, i64); @@ -1408,6 +1477,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv16i8( + undef, * %0, i64 %1) @@ -1438,6 +1508,7 @@ } declare @llvm.riscv.vle.nxv32i8( + , *, i64); @@ -1449,6 +1520,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv32i8( + undef, * %0, i64 %1) @@ -1479,6 +1551,7 @@ } declare @llvm.riscv.vle.nxv64i8( + , *, i64); @@ -1490,6 +1563,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.nxv64i8( + undef, * %0, i64 %1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare { , i32 } @llvm.riscv.vleff.nxv1i64( + , *, i32); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1i64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -52,6 +54,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2i64( + , *, i32); @@ -65,6 +68,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2i64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -102,6 +106,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4i64( + , *, i32); @@ -115,6 +120,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4i64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -152,6 +158,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8i64( + , *, i32); @@ -165,6 +172,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8i64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -202,6 +210,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv1f64( + , *, i32); @@ -215,6 +224,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -252,6 +262,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2f64( + , *, i32); @@ -265,6 +276,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2f64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -302,6 +314,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4f64( + , *, i32); @@ -315,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4f64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -352,6 +366,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8f64( + , *, i32); @@ -365,6 +380,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8f64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -402,6 +418,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv1i32( + , *, i32); @@ -415,6 +432,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1i32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -452,6 +470,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2i32( + , *, i32); @@ -465,6 +484,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2i32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -502,6 +522,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4i32( + , *, i32); @@ -515,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4i32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -552,6 +574,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8i32( + , *, i32); @@ -565,6 +588,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8i32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -602,6 +626,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv16i32( + , *, i32); @@ -615,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv16i32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -652,6 +678,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv1f32( + , *, i32); @@ -665,6 +692,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1f32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -702,6 +730,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2f32( + , *, i32); @@ -715,6 +744,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2f32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -752,6 +782,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4f32( + , *, i32); @@ -765,6 +796,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4f32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -802,6 +834,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8f32( + , *, i32); @@ -815,6 +848,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8f32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -852,6 +886,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv16f32( + , *, i32); @@ -865,6 +900,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv16f32( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -902,6 +938,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv1i16( + , *, i32); @@ -915,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1i16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -952,6 +990,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2i16( + , *, i32); @@ -965,6 +1004,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2i16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1002,6 +1042,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4i16( + , *, i32); @@ -1015,6 +1056,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4i16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1052,6 +1094,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8i16( + , *, i32); @@ -1065,6 +1108,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8i16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1102,6 +1146,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv16i16( + , *, i32); @@ -1115,6 +1160,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv16i16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1152,6 +1198,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv32i16( + , *, i32); @@ -1165,6 +1212,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv32i16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1202,6 +1250,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv1f16( + , *, i32); @@ -1215,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1f16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1252,6 +1302,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2f16( + , *, i32); @@ -1265,6 +1316,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2f16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1302,6 +1354,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4f16( + , *, i32); @@ -1315,6 +1368,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4f16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1352,6 +1406,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8f16( + , *, i32); @@ -1365,6 +1420,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8f16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1402,6 +1458,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv16f16( + , *, i32); @@ -1415,6 +1472,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv16f16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1452,6 +1510,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv32f16( + , *, i32); @@ -1465,6 +1524,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv32f16( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1502,6 +1562,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv1i8( + , *, i32); @@ -1515,6 +1576,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1552,6 +1614,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv2i8( + , *, i32); @@ -1565,6 +1628,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv2i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1602,6 +1666,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv4i8( + , *, i32); @@ -1615,6 +1680,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv4i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1652,6 +1718,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv8i8( + , *, i32); @@ -1665,6 +1732,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv8i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1702,6 +1770,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv16i8( + , *, i32); @@ -1715,6 +1784,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv16i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1752,6 +1822,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv32i8( + , *, i32); @@ -1765,6 +1836,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv32i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1802,6 +1874,7 @@ } declare { , i32 } @llvm.riscv.vleff.nxv64i8( + , *, i32); @@ -1815,6 +1888,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv64i8( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1860,6 +1934,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 0 @@ -1894,6 +1969,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i32 %1) %b = extractvalue { , i32 } %a, 1 @@ -1930,6 +2006,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i32 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i32 %1) ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare { , i64 } @llvm.riscv.vleff.nxv1i64( + , *, i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1i64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -52,6 +54,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2i64( + , *, i64); @@ -65,6 +68,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2i64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -102,6 +106,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4i64( + , *, i64); @@ -115,6 +120,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4i64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -152,6 +158,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8i64( + , *, i64); @@ -165,6 +172,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8i64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -202,6 +210,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv1f64( + , *, i64); @@ -215,6 +224,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -252,6 +262,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2f64( + , *, i64); @@ -265,6 +276,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2f64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -302,6 +314,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4f64( + , *, i64); @@ -315,6 +328,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4f64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -352,6 +366,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8f64( + , *, i64); @@ -365,6 +380,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8f64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -402,6 +418,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv1i32( + , *, i64); @@ -415,6 +432,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1i32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -452,6 +470,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2i32( + , *, i64); @@ -465,6 +484,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2i32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -502,6 +522,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4i32( + , *, i64); @@ -515,6 +536,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4i32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -552,6 +574,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8i32( + , *, i64); @@ -565,6 +588,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8i32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -602,6 +626,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv16i32( + , *, i64); @@ -615,6 +640,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv16i32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -652,6 +678,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv1f32( + , *, i64); @@ -665,6 +692,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1f32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -702,6 +730,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2f32( + , *, i64); @@ -715,6 +744,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2f32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -752,6 +782,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4f32( + , *, i64); @@ -765,6 +796,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4f32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -802,6 +834,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8f32( + , *, i64); @@ -815,6 +848,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8f32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -852,6 +886,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv16f32( + , *, i64); @@ -865,6 +900,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv16f32( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -902,6 +938,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv1i16( + , *, i64); @@ -915,6 +952,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1i16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -952,6 +990,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2i16( + , *, i64); @@ -965,6 +1004,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2i16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1002,6 +1042,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4i16( + , *, i64); @@ -1015,6 +1056,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4i16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1052,6 +1094,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8i16( + , *, i64); @@ -1065,6 +1108,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8i16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1102,6 +1146,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv16i16( + , *, i64); @@ -1115,6 +1160,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv16i16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1152,6 +1198,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv32i16( + , *, i64); @@ -1165,6 +1212,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv32i16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1202,6 +1250,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv1f16( + , *, i64); @@ -1215,6 +1264,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1f16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1252,6 +1302,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2f16( + , *, i64); @@ -1265,6 +1316,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2f16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1302,6 +1354,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4f16( + , *, i64); @@ -1315,6 +1368,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4f16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1352,6 +1406,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8f16( + , *, i64); @@ -1365,6 +1420,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8f16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1402,6 +1458,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv16f16( + , *, i64); @@ -1415,6 +1472,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv16f16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1452,6 +1510,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv32f16( + , *, i64); @@ -1465,6 +1524,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv32f16( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1502,6 +1562,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv1i8( + , *, i64); @@ -1515,6 +1576,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1552,6 +1614,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv2i8( + , *, i64); @@ -1565,6 +1628,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv2i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1602,6 +1666,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv4i8( + , *, i64); @@ -1615,6 +1680,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv4i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1652,6 +1718,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv8i8( + , *, i64); @@ -1665,6 +1732,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv8i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1702,6 +1770,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv16i8( + , *, i64); @@ -1715,6 +1784,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv16i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1752,6 +1822,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv32i8( + , *, i64); @@ -1765,6 +1836,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv32i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1802,6 +1874,7 @@ } declare { , i64 } @llvm.riscv.vleff.nxv64i8( + , *, i64); @@ -1815,6 +1888,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv64i8( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1860,6 +1934,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 0 @@ -1894,6 +1969,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i64 %1) %b = extractvalue { , i64 } %a, 1 @@ -1930,6 +2006,7 @@ ; CHECK-NEXT: ret entry: %a = call { , i64 } @llvm.riscv.vleff.nxv1f64( + undef, * %0, i64 %1) ret void diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( + , *, , i32); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i32( + undef, * %0, %1, i32 %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( + , *, , i32); @@ -62,6 +65,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i32( + undef, * %0, %1, i32 %2) @@ -95,6 +99,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( + , *, , i32); @@ -108,6 +113,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i32( + undef, * %0, %1, i32 %2) @@ -141,6 +147,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( + , *, , i32); @@ -154,6 +161,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( + undef, * %0, %1, i32 %2) @@ -187,6 +195,7 @@ } declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( + , *, , i32); @@ -200,6 +209,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( + undef, * %0, %1, i32 %2) @@ -233,6 +243,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( + , *, , i32); @@ -246,6 +257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i32( + undef, * %0, %1, i32 %2) @@ -279,6 +291,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( + , *, , i32); @@ -292,6 +305,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i32( + undef, * %0, %1, i32 %2) @@ -325,6 +339,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( + , *, , i32); @@ -338,6 +353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( + undef, * %0, %1, i32 %2) @@ -371,6 +387,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( + , *, , i32); @@ -384,6 +401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( + undef, * %0, %1, i32 %2) @@ -417,6 +435,7 @@ } declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( + , *, , i32); @@ -430,6 +449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( + undef, * %0, %1, i32 %2) @@ -463,6 +483,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( + , *, , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i32( + undef, * %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( + , *, , i32); @@ -520,6 +543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i32( + undef, * %0, %1, i32 %2) @@ -553,6 +577,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( + , *, , i32); @@ -565,6 +590,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i32( + undef, * %0, %1, i32 %2) @@ -598,6 +624,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( + , *, , i32); @@ -610,6 +637,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i32( + undef, * %0, %1, i32 %2) @@ -643,6 +671,7 @@ } declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( + , *, , i32); @@ -655,6 +684,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i32( + undef, * %0, %1, i32 %2) @@ -688,6 +718,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( + , *, , i32); @@ -701,6 +732,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( + undef, * %0, %1, i32 %2) @@ -734,6 +766,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( + , *, , i32); @@ -747,6 +780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( + undef, * %0, %1, i32 %2) @@ -780,6 +814,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( + , *, , i32); @@ -793,6 +828,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( + undef, * %0, %1, i32 %2) @@ -826,6 +862,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( + , *, , i32); @@ -839,6 +876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( + undef, * %0, %1, i32 %2) @@ -872,6 +910,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( + , *, , i32); @@ -885,6 +924,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i32( + undef, * %0, %1, i32 %2) @@ -918,6 +958,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( + , *, , i32); @@ -931,6 +972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i32( + undef, * %0, %1, i32 %2) @@ -964,6 +1006,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( + , *, , i32); @@ -977,6 +1020,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( + undef, * %0, %1, i32 %2) @@ -1010,6 +1054,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( + , *, , i32); @@ -1023,6 +1068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( + undef, * %0, %1, i32 %2) @@ -1056,6 +1102,7 @@ } declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( + , *, , i32); @@ -1069,6 +1116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( + undef, * %0, %1, i32 %2) @@ -1102,6 +1150,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( + , *, , i32); @@ -1114,6 +1163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i32( + undef, * %0, %1, i32 %2) @@ -1147,6 +1197,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( + , *, , i32); @@ -1159,6 +1210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i32( + undef, * %0, %1, i32 %2) @@ -1192,6 +1244,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( + , *, , i32); @@ -1204,6 +1257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i32( + undef, * %0, %1, i32 %2) @@ -1237,6 +1291,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( + , *, , i32); @@ -1249,6 +1304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i32( + undef, * %0, %1, i32 %2) @@ -1282,6 +1338,7 @@ } declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( + , *, , i32); @@ -1294,6 +1351,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i32( + undef, * %0, %1, i32 %2) @@ -1327,6 +1385,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( + , *, , i32); @@ -1340,6 +1399,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( + undef, * %0, %1, i32 %2) @@ -1373,6 +1433,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( + , *, , i32); @@ -1386,6 +1447,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( + undef, * %0, %1, i32 %2) @@ -1419,6 +1481,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( + , *, , i32); @@ -1432,6 +1495,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( + undef, * %0, %1, i32 %2) @@ -1465,6 +1529,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( + , *, , i32); @@ -1478,6 +1543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( + undef, * %0, %1, i32 %2) @@ -1511,6 +1577,7 @@ } declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( + , *, , i32); @@ -1524,6 +1591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i16( + undef, * %0, %1, i32 %2) @@ -1557,6 +1625,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( + , *, , i32); @@ -1570,6 +1639,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i16( + undef, * %0, %1, i32 %2) @@ -1603,6 +1673,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( + , *, , i32); @@ -1616,6 +1687,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i16( + undef, * %0, %1, i32 %2) @@ -1649,6 +1721,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( + , *, , i32); @@ -1662,6 +1735,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( + undef, * %0, %1, i32 %2) @@ -1695,6 +1769,7 @@ } declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( + , *, , i32); @@ -1708,6 +1783,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( + undef, * %0, %1, i32 %2) @@ -1741,6 +1817,7 @@ } declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( + , *, , i32); @@ -1754,6 +1831,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( + undef, * %0, %1, i32 %2) @@ -1787,6 +1865,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( + , *, , i32); @@ -1799,6 +1878,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i16( + undef, * %0, %1, i32 %2) @@ -1832,6 +1912,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( + , *, , i32); @@ -1844,6 +1925,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i16( + undef, * %0, %1, i32 %2) @@ -1877,6 +1959,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( + , *, , i32); @@ -1889,6 +1972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i16( + undef, * %0, %1, i32 %2) @@ -1922,6 +2006,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( + , *, , i32); @@ -1934,6 +2019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i16( + undef, * %0, %1, i32 %2) @@ -1967,6 +2053,7 @@ } declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( + , *, , i32); @@ -1979,6 +2066,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2012,6 +2100,7 @@ } declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( + , *, , i32); @@ -2024,6 +2113,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i16( + undef, * %0, %1, i32 %2) @@ -2057,6 +2147,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( + , *, , i32); @@ -2070,6 +2161,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2103,6 +2195,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( + , *, , i32); @@ -2116,6 +2209,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2149,6 +2243,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( + , *, , i32); @@ -2162,6 +2257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2195,6 +2291,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( + , *, , i32); @@ -2208,6 +2305,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2241,6 +2339,7 @@ } declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( + , *, , i32); @@ -2254,6 +2353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2287,6 +2387,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( + , *, , i32); @@ -2300,6 +2401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2333,6 +2435,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( + , *, , i32); @@ -2346,6 +2449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2379,6 +2483,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( + , *, , i32); @@ -2392,6 +2497,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2425,6 +2531,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( + , *, , i32); @@ -2438,6 +2545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2471,6 +2579,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( + , *, , i32); @@ -2483,6 +2592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2516,6 +2626,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( + , *, , i32); @@ -2528,6 +2639,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2561,6 +2673,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( + , *, , i32); @@ -2573,6 +2686,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2606,6 +2720,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( + , *, , i32); @@ -2618,6 +2733,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2651,6 +2767,7 @@ } declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( + , *, , i32); @@ -2663,6 +2780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2696,6 +2814,7 @@ } declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( + , *, , i32); @@ -2708,6 +2827,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i16( + undef, * %0, %1, i32 %2) @@ -2741,6 +2861,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( + , *, , i32); @@ -2754,6 +2875,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2787,6 +2909,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( + , *, , i32); @@ -2800,6 +2923,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2833,6 +2957,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( + , *, , i32); @@ -2846,6 +2971,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2879,6 +3005,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( + , *, , i32); @@ -2892,6 +3019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2925,6 +3053,7 @@ } declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( + , *, , i32); @@ -2938,6 +3067,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2971,6 +3101,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( + , *, , i32); @@ -2984,6 +3115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( + undef, * %0, %1, i32 %2) @@ -3017,6 +3149,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( + , *, , i32); @@ -3030,6 +3163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( + undef, * %0, %1, i32 %2) @@ -3063,6 +3197,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( + , *, , i32); @@ -3076,6 +3211,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( + undef, * %0, %1, i32 %2) @@ -3109,6 +3245,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( + , *, , i32); @@ -3122,6 +3259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( + undef, * %0, %1, i32 %2) @@ -3155,6 +3293,7 @@ } declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( + , *, , i32); @@ -3167,6 +3306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i8( + undef, * %0, %1, i32 %2) @@ -3200,6 +3340,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( + , *, , i32); @@ -3212,6 +3353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i8( + undef, * %0, %1, i32 %2) @@ -3245,6 +3387,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( + , *, , i32); @@ -3257,6 +3400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i8( + undef, * %0, %1, i32 %2) @@ -3290,6 +3434,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( + , *, , i32); @@ -3302,6 +3447,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i8( + undef, * %0, %1, i32 %2) @@ -3335,6 +3481,7 @@ } declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( + , *, , i32); @@ -3347,6 +3494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i8( + undef, * %0, %1, i32 %2) @@ -3380,6 +3528,7 @@ } declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( + , *, , i32); @@ -3392,6 +3541,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i8( + undef, * %0, %1, i32 %2) @@ -3425,6 +3575,7 @@ } declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( + , *, , i32); @@ -3437,6 +3588,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv64i8.nxv64i8( + undef, * %0, %1, i32 %2) @@ -3470,6 +3622,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( + , *, , i32); @@ -3483,6 +3636,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i8( + undef, * %0, %1, i32 %2) @@ -3516,6 +3670,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( + , *, , i32); @@ -3529,6 +3684,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i8( + undef, * %0, %1, i32 %2) @@ -3562,6 +3718,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( + , *, , i32); @@ -3575,6 +3732,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( + undef, * %0, %1, i32 %2) @@ -3608,6 +3766,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( + , *, , i32); @@ -3621,6 +3780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( + undef, * %0, %1, i32 %2) @@ -3654,6 +3814,7 @@ } declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( + , *, , i32); @@ -3667,6 +3828,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( + undef, * %0, %1, i32 %2) @@ -3700,6 +3862,7 @@ } declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( + , *, , i32); @@ -3713,6 +3876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( + undef, * %0, %1, i32 %2) @@ -3746,6 +3910,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( + , *, , i32); @@ -3759,6 +3924,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i8( + undef, * %0, %1, i32 %2) @@ -3792,6 +3958,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( + , *, , i32); @@ -3805,6 +3972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( + undef, * %0, %1, i32 %2) @@ -3838,6 +4006,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( + , *, , i32); @@ -3851,6 +4020,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( + undef, * %0, %1, i32 %2) @@ -3884,6 +4054,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( + , *, , i32); @@ -3897,6 +4068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( + undef, * %0, %1, i32 %2) @@ -3930,6 +4102,7 @@ } declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( + , *, , i32); @@ -3943,6 +4116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( + undef, * %0, %1, i32 %2) @@ -3976,6 +4150,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( + , *, , i32); @@ -3989,6 +4164,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4022,6 +4198,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( + , *, , i32); @@ -4035,6 +4212,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4068,6 +4246,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( + , *, , i32); @@ -4081,6 +4260,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4114,6 +4294,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( + , *, , i32); @@ -4127,6 +4308,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( + undef, * %0, %1, i32 %2) @@ -4160,6 +4342,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( + , *, , i32); @@ -4173,6 +4356,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4206,6 +4390,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( + , *, , i32); @@ -4219,6 +4404,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4252,6 +4438,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( + , *, , i32); @@ -4265,6 +4452,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4298,6 +4486,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( + , *, , i32); @@ -4311,6 +4500,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( + undef, * %0, %1, i32 %2) @@ -4344,6 +4534,7 @@ } declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( + , *, , i32); @@ -4357,6 +4548,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( + undef, * %0, %1, i32 %2) @@ -4390,6 +4582,7 @@ } declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( + , *, , i32); @@ -4403,6 +4596,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( + undef, * %0, %1, i32 %2) @@ -4436,6 +4630,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( + , *, , i32); @@ -4449,6 +4644,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4482,6 +4678,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( + , *, , i32); @@ -4495,6 +4692,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4528,6 +4726,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( + , *, , i32); @@ -4541,6 +4740,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4574,6 +4774,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( + , *, , i32); @@ -4587,6 +4788,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( + undef, * %0, %1, i32 %2) @@ -4620,6 +4822,7 @@ } declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( + , *, , i32); @@ -4633,6 +4836,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( + undef, * %0, %1, i32 %2) @@ -4666,6 +4870,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( + , *, , i32); @@ -4679,6 +4884,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4712,6 +4918,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( + , *, , i32); @@ -4725,6 +4932,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4758,6 +4966,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( + , *, , i32); @@ -4771,6 +4980,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4804,6 +5014,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( + , *, , i32); @@ -4817,6 +5028,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( + undef, * %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( + , *, , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i64( + undef, * %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( + , *, , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i64( + undef, * %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( + , *, , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i64( + undef, * %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( + , *, , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( + undef, * %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( + , *, , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i64( + undef, * %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( + , *, , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i64( + undef, * %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( + , *, , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( + undef, * %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( + , *, , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( + undef, * %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( + , *, , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i64( + undef, * %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( + , *, , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( + undef, * %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( + , *, , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( + undef, * %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( + , *, , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( + undef, * %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( + , *, , i64); @@ -566,6 +591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i64( + undef, * %0, %1, i64 %2) @@ -599,6 +625,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( + , *, , i64); @@ -611,6 +638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i64( + undef, * %0, %1, i64 %2) @@ -644,6 +672,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( + , *, , i64); @@ -656,6 +685,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i64( + undef, * %0, %1, i64 %2) @@ -689,6 +719,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( + , *, , i64); @@ -701,6 +732,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i64( + undef, * %0, %1, i64 %2) @@ -734,6 +766,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( + , *, , i64); @@ -747,6 +780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i64( + undef, * %0, %1, i64 %2) @@ -780,6 +814,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( + , *, , i64); @@ -793,6 +828,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i64( + undef, * %0, %1, i64 %2) @@ -826,6 +862,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( + , *, , i64); @@ -839,6 +876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( + undef, * %0, %1, i64 %2) @@ -872,6 +910,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( + , *, , i64); @@ -885,6 +924,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( + undef, * %0, %1, i64 %2) @@ -918,6 +958,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( + , *, , i64); @@ -931,6 +972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i64( + undef, * %0, %1, i64 %2) @@ -964,6 +1006,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( + , *, , i64); @@ -977,6 +1020,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( + undef, * %0, %1, i64 %2) @@ -1010,6 +1054,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( + , *, , i64); @@ -1023,6 +1068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( + undef, * %0, %1, i64 %2) @@ -1056,6 +1102,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( + , *, , i64); @@ -1069,6 +1116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( + undef, * %0, %1, i64 %2) @@ -1102,6 +1150,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( + , *, , i64); @@ -1114,6 +1163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i64( + undef, * %0, %1, i64 %2) @@ -1147,6 +1197,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( + , *, , i64); @@ -1159,6 +1210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i64( + undef, * %0, %1, i64 %2) @@ -1192,6 +1244,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( + , *, , i64); @@ -1204,6 +1257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i64( + undef, * %0, %1, i64 %2) @@ -1237,6 +1291,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( + , *, , i64); @@ -1249,6 +1304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i64( + undef, * %0, %1, i64 %2) @@ -1282,6 +1338,7 @@ } declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( + , *, , i64); @@ -1295,6 +1352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i32( + undef, * %0, %1, i64 %2) @@ -1328,6 +1386,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i32( + , *, , i64); @@ -1341,6 +1400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i32( + undef, * %0, %1, i64 %2) @@ -1374,6 +1434,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i32( + , *, , i64); @@ -1387,6 +1448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i32( + undef, * %0, %1, i64 %2) @@ -1420,6 +1482,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i32( + , *, , i64); @@ -1433,6 +1496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( + undef, * %0, %1, i64 %2) @@ -1466,6 +1530,7 @@ } declare @llvm.riscv.vloxei.nxv16i8.nxv16i32( + , *, , i64); @@ -1479,6 +1544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( + undef, * %0, %1, i64 %2) @@ -1512,6 +1578,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i32( + , *, , i64); @@ -1525,6 +1592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i32( + undef, * %0, %1, i64 %2) @@ -1558,6 +1626,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i32( + , *, , i64); @@ -1571,6 +1640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i32( + undef, * %0, %1, i64 %2) @@ -1604,6 +1674,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i32( + , *, , i64); @@ -1617,6 +1688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( + undef, * %0, %1, i64 %2) @@ -1650,6 +1722,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i32( + , *, , i64); @@ -1663,6 +1736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( + undef, * %0, %1, i64 %2) @@ -1696,6 +1770,7 @@ } declare @llvm.riscv.vloxei.nxv16i16.nxv16i32( + , *, , i64); @@ -1709,6 +1784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( + undef, * %0, %1, i64 %2) @@ -1742,6 +1818,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i32( + , *, , i64); @@ -1754,6 +1831,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i32( + undef, * %0, %1, i64 %2) @@ -1787,6 +1865,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i32( + , *, , i64); @@ -1799,6 +1878,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i32( + undef, * %0, %1, i64 %2) @@ -1832,6 +1912,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i32( + , *, , i64); @@ -1844,6 +1925,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i32( + undef, * %0, %1, i64 %2) @@ -1877,6 +1959,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i32( + , *, , i64); @@ -1889,6 +1972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i32( + undef, * %0, %1, i64 %2) @@ -1922,6 +2006,7 @@ } declare @llvm.riscv.vloxei.nxv16i32.nxv16i32( + , *, , i64); @@ -1934,6 +2019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i32( + undef, * %0, %1, i64 %2) @@ -1967,6 +2053,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i32( + , *, , i64); @@ -1980,6 +2067,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2013,6 +2101,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i32( + , *, , i64); @@ -2026,6 +2115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2059,6 +2149,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i32( + , *, , i64); @@ -2072,6 +2163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2105,6 +2197,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i32( + , *, , i64); @@ -2118,6 +2211,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2151,6 +2245,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i32( + , *, , i64); @@ -2164,6 +2259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2197,6 +2293,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i32( + , *, , i64); @@ -2210,6 +2307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2243,6 +2341,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i32( + , *, , i64); @@ -2256,6 +2355,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2289,6 +2389,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i32( + , *, , i64); @@ -2302,6 +2403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2335,6 +2437,7 @@ } declare @llvm.riscv.vloxei.nxv16f16.nxv16i32( + , *, , i64); @@ -2348,6 +2451,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( + undef, * %0, %1, i64 %2) @@ -2381,6 +2485,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i32( + , *, , i64); @@ -2393,6 +2498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2426,6 +2532,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i32( + , *, , i64); @@ -2438,6 +2545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2471,6 +2579,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i32( + , *, , i64); @@ -2483,6 +2592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2516,6 +2626,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i32( + , *, , i64); @@ -2528,6 +2639,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2561,6 +2673,7 @@ } declare @llvm.riscv.vloxei.nxv16f32.nxv16i32( + , *, , i64); @@ -2573,6 +2686,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i32( + undef, * %0, %1, i64 %2) @@ -2606,6 +2720,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i32( + , *, , i64); @@ -2619,6 +2734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2652,6 +2768,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i32( + , *, , i64); @@ -2665,6 +2782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2698,6 +2816,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i32( + , *, , i64); @@ -2711,6 +2830,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2744,6 +2864,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i32( + , *, , i64); @@ -2757,6 +2878,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2790,6 +2912,7 @@ } declare @llvm.riscv.vloxei.nxv1i8.nxv1i16( + , *, , i64); @@ -2803,6 +2926,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i16( + undef, * %0, %1, i64 %2) @@ -2836,6 +2960,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i16( + , *, , i64); @@ -2849,6 +2974,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i16( + undef, * %0, %1, i64 %2) @@ -2882,6 +3008,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i16( + , *, , i64); @@ -2895,6 +3022,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i16( + undef, * %0, %1, i64 %2) @@ -2928,6 +3056,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i16( + , *, , i64); @@ -2941,6 +3070,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( + undef, * %0, %1, i64 %2) @@ -2974,6 +3104,7 @@ } declare @llvm.riscv.vloxei.nxv16i8.nxv16i16( + , *, , i64); @@ -2987,6 +3118,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3020,6 +3152,7 @@ } declare @llvm.riscv.vloxei.nxv32i8.nxv32i16( + , *, , i64); @@ -3033,6 +3166,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( + undef, * %0, %1, i64 %2) @@ -3066,6 +3200,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i16( + , *, , i64); @@ -3078,6 +3213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3111,6 +3247,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i16( + , *, , i64); @@ -3123,6 +3260,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3156,6 +3294,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i16( + , *, , i64); @@ -3168,6 +3307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3201,6 +3341,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i16( + , *, , i64); @@ -3213,6 +3354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3246,6 +3388,7 @@ } declare @llvm.riscv.vloxei.nxv16i16.nxv16i16( + , *, , i64); @@ -3258,6 +3401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3291,6 +3435,7 @@ } declare @llvm.riscv.vloxei.nxv32i16.nxv32i16( + , *, , i64); @@ -3303,6 +3448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i16( + undef, * %0, %1, i64 %2) @@ -3336,6 +3482,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i16( + , *, , i64); @@ -3349,6 +3496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3382,6 +3530,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i16( + , *, , i64); @@ -3395,6 +3544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3428,6 +3578,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i16( + , *, , i64); @@ -3441,6 +3592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3474,6 +3626,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i16( + , *, , i64); @@ -3487,6 +3640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3520,6 +3674,7 @@ } declare @llvm.riscv.vloxei.nxv16i32.nxv16i16( + , *, , i64); @@ -3533,6 +3688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3566,6 +3722,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i16( + , *, , i64); @@ -3579,6 +3736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3612,6 +3770,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i16( + , *, , i64); @@ -3625,6 +3784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3658,6 +3818,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i16( + , *, , i64); @@ -3671,6 +3832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3704,6 +3866,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i16( + , *, , i64); @@ -3717,6 +3880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3750,6 +3914,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i16( + , *, , i64); @@ -3762,6 +3927,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3795,6 +3961,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i16( + , *, , i64); @@ -3807,6 +3974,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3840,6 +4008,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i16( + , *, , i64); @@ -3852,6 +4021,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3885,6 +4055,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i16( + , *, , i64); @@ -3897,6 +4068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3930,6 +4102,7 @@ } declare @llvm.riscv.vloxei.nxv16f16.nxv16i16( + , *, , i64); @@ -3942,6 +4115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3975,6 +4149,7 @@ } declare @llvm.riscv.vloxei.nxv32f16.nxv32i16( + , *, , i64); @@ -3987,6 +4162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i16( + undef, * %0, %1, i64 %2) @@ -4020,6 +4196,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i16( + , *, , i64); @@ -4033,6 +4210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i16( + undef, * %0, %1, i64 %2) @@ -4066,6 +4244,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i16( + , *, , i64); @@ -4079,6 +4258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( + undef, * %0, %1, i64 %2) @@ -4112,6 +4292,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i16( + , *, , i64); @@ -4125,6 +4306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( + undef, * %0, %1, i64 %2) @@ -4158,6 +4340,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i16( + , *, , i64); @@ -4171,6 +4354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( + undef, * %0, %1, i64 %2) @@ -4204,6 +4388,7 @@ } declare @llvm.riscv.vloxei.nxv16f32.nxv16i16( + , *, , i64); @@ -4217,6 +4402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( + undef, * %0, %1, i64 %2) @@ -4250,6 +4436,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i16( + , *, , i64); @@ -4263,6 +4450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( + undef, * %0, %1, i64 %2) @@ -4296,6 +4484,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i16( + , *, , i64); @@ -4309,6 +4498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( + undef, * %0, %1, i64 %2) @@ -4342,6 +4532,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i16( + , *, , i64); @@ -4355,6 +4546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( + undef, * %0, %1, i64 %2) @@ -4388,6 +4580,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i16( + , *, , i64); @@ -4401,6 +4594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( + undef, * %0, %1, i64 %2) @@ -4434,6 +4628,7 @@ } declare @llvm.riscv.vloxei.nxv1i8.nxv1i8( + , *, , i64); @@ -4446,6 +4641,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i8( + undef, * %0, %1, i64 %2) @@ -4479,6 +4675,7 @@ } declare @llvm.riscv.vloxei.nxv2i8.nxv2i8( + , *, , i64); @@ -4491,6 +4688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i8( + undef, * %0, %1, i64 %2) @@ -4524,6 +4722,7 @@ } declare @llvm.riscv.vloxei.nxv4i8.nxv4i8( + , *, , i64); @@ -4536,6 +4735,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i8( + undef, * %0, %1, i64 %2) @@ -4569,6 +4769,7 @@ } declare @llvm.riscv.vloxei.nxv8i8.nxv8i8( + , *, , i64); @@ -4581,6 +4782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i8( + undef, * %0, %1, i64 %2) @@ -4614,6 +4816,7 @@ } declare @llvm.riscv.vloxei.nxv16i8.nxv16i8( + , *, , i64); @@ -4626,6 +4829,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i8( + undef, * %0, %1, i64 %2) @@ -4659,6 +4863,7 @@ } declare @llvm.riscv.vloxei.nxv32i8.nxv32i8( + , *, , i64); @@ -4671,6 +4876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i8( + undef, * %0, %1, i64 %2) @@ -4704,6 +4910,7 @@ } declare @llvm.riscv.vloxei.nxv64i8.nxv64i8( + , *, , i64); @@ -4716,6 +4923,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv64i8.nxv64i8( + undef, * %0, %1, i64 %2) @@ -4749,6 +4957,7 @@ } declare @llvm.riscv.vloxei.nxv1i16.nxv1i8( + , *, , i64); @@ -4762,6 +4971,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i8( + undef, * %0, %1, i64 %2) @@ -4795,6 +5005,7 @@ } declare @llvm.riscv.vloxei.nxv2i16.nxv2i8( + , *, , i64); @@ -4808,6 +5019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i8( + undef, * %0, %1, i64 %2) @@ -4841,6 +5053,7 @@ } declare @llvm.riscv.vloxei.nxv4i16.nxv4i8( + , *, , i64); @@ -4854,6 +5067,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( + undef, * %0, %1, i64 %2) @@ -4887,6 +5101,7 @@ } declare @llvm.riscv.vloxei.nxv8i16.nxv8i8( + , *, , i64); @@ -4900,6 +5115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( + undef, * %0, %1, i64 %2) @@ -4933,6 +5149,7 @@ } declare @llvm.riscv.vloxei.nxv16i16.nxv16i8( + , *, , i64); @@ -4946,6 +5163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( + undef, * %0, %1, i64 %2) @@ -4979,6 +5197,7 @@ } declare @llvm.riscv.vloxei.nxv32i16.nxv32i8( + , *, , i64); @@ -4992,6 +5211,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( + undef, * %0, %1, i64 %2) @@ -5025,6 +5245,7 @@ } declare @llvm.riscv.vloxei.nxv1i32.nxv1i8( + , *, , i64); @@ -5038,6 +5259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5071,6 +5293,7 @@ } declare @llvm.riscv.vloxei.nxv2i32.nxv2i8( + , *, , i64); @@ -5084,6 +5307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5117,6 +5341,7 @@ } declare @llvm.riscv.vloxei.nxv4i32.nxv4i8( + , *, , i64); @@ -5130,6 +5355,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5163,6 +5389,7 @@ } declare @llvm.riscv.vloxei.nxv8i32.nxv8i8( + , *, , i64); @@ -5176,6 +5403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5209,6 +5437,7 @@ } declare @llvm.riscv.vloxei.nxv16i32.nxv16i8( + , *, , i64); @@ -5222,6 +5451,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( + undef, * %0, %1, i64 %2) @@ -5255,6 +5485,7 @@ } declare @llvm.riscv.vloxei.nxv1i64.nxv1i8( + , *, , i64); @@ -5268,6 +5499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5301,6 +5533,7 @@ } declare @llvm.riscv.vloxei.nxv2i64.nxv2i8( + , *, , i64); @@ -5314,6 +5547,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5347,6 +5581,7 @@ } declare @llvm.riscv.vloxei.nxv4i64.nxv4i8( + , *, , i64); @@ -5360,6 +5595,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5393,6 +5629,7 @@ } declare @llvm.riscv.vloxei.nxv8i64.nxv8i8( + , *, , i64); @@ -5406,6 +5643,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5439,6 +5677,7 @@ } declare @llvm.riscv.vloxei.nxv1f16.nxv1i8( + , *, , i64); @@ -5452,6 +5691,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5485,6 +5725,7 @@ } declare @llvm.riscv.vloxei.nxv2f16.nxv2i8( + , *, , i64); @@ -5498,6 +5739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5531,6 +5773,7 @@ } declare @llvm.riscv.vloxei.nxv4f16.nxv4i8( + , *, , i64); @@ -5544,6 +5787,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5577,6 +5821,7 @@ } declare @llvm.riscv.vloxei.nxv8f16.nxv8i8( + , *, , i64); @@ -5590,6 +5835,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5623,6 +5869,7 @@ } declare @llvm.riscv.vloxei.nxv16f16.nxv16i8( + , *, , i64); @@ -5636,6 +5883,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( + undef, * %0, %1, i64 %2) @@ -5669,6 +5917,7 @@ } declare @llvm.riscv.vloxei.nxv32f16.nxv32i8( + , *, , i64); @@ -5682,6 +5931,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( + undef, * %0, %1, i64 %2) @@ -5715,6 +5965,7 @@ } declare @llvm.riscv.vloxei.nxv1f32.nxv1i8( + , *, , i64); @@ -5728,6 +5979,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5761,6 +6013,7 @@ } declare @llvm.riscv.vloxei.nxv2f32.nxv2i8( + , *, , i64); @@ -5774,6 +6027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5807,6 +6061,7 @@ } declare @llvm.riscv.vloxei.nxv4f32.nxv4i8( + , *, , i64); @@ -5820,6 +6075,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5853,6 +6109,7 @@ } declare @llvm.riscv.vloxei.nxv8f32.nxv8i8( + , *, , i64); @@ -5866,6 +6123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5899,6 +6157,7 @@ } declare @llvm.riscv.vloxei.nxv16f32.nxv16i8( + , *, , i64); @@ -5912,6 +6171,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( + undef, * %0, %1, i64 %2) @@ -5945,6 +6205,7 @@ } declare @llvm.riscv.vloxei.nxv1f64.nxv1i8( + , *, , i64); @@ -5958,6 +6219,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5991,6 +6253,7 @@ } declare @llvm.riscv.vloxei.nxv2f64.nxv2i8( + , *, , i64); @@ -6004,6 +6267,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( + undef, * %0, %1, i64 %2) @@ -6037,6 +6301,7 @@ } declare @llvm.riscv.vloxei.nxv4f64.nxv4i8( + , *, , i64); @@ -6050,6 +6315,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( + undef, * %0, %1, i64 %2) @@ -6083,6 +6349,7 @@ } declare @llvm.riscv.vloxei.nxv8f64.nxv8i8( + , *, , i64); @@ -6096,6 +6363,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( + undef, * %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vlse.nxv1i64( + , *, i32, i32); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i64( + undef, * %0, i32 %1, i32 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vlse.nxv2i64( + , *, i32, i32); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i64( + undef, * %0, i32 %1, i32 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vlse.nxv4i64( + , *, i32, i32); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i64( + undef, * %0, i32 %1, i32 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vlse.nxv8i64( + , *, i32, i32); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i64( + undef, * %0, i32 %1, i32 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vlse.nxv1f64( + , *, i32, i32); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1f64( + undef, * %0, i32 %1, i32 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vlse.nxv2f64( + , *, i32, i32); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2f64( + undef, * %0, i32 %1, i32 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vlse.nxv4f64( + , *, i32, i32); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4f64( + undef, * %0, i32 %1, i32 %2) @@ -317,6 +331,7 @@ } declare @llvm.riscv.vlse.nxv8f64( + , *, i32, i32); @@ -329,6 +344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8f64( + undef, * %0, i32 %1, i32 %2) @@ -362,6 +378,7 @@ } declare @llvm.riscv.vlse.nxv1i32( + , *, i32, i32); @@ -374,6 +391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i32( + undef, * %0, i32 %1, i32 %2) @@ -407,6 +425,7 @@ } declare @llvm.riscv.vlse.nxv2i32( + , *, i32, i32); @@ -419,6 +438,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i32( + undef, * %0, i32 %1, i32 %2) @@ -452,6 +472,7 @@ } declare @llvm.riscv.vlse.nxv4i32( + , *, i32, i32); @@ -464,6 +485,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i32( + undef, * %0, i32 %1, i32 %2) @@ -497,6 +519,7 @@ } declare @llvm.riscv.vlse.nxv8i32( + , *, i32, i32); @@ -509,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i32( + undef, * %0, i32 %1, i32 %2) @@ -542,6 +566,7 @@ } declare @llvm.riscv.vlse.nxv16i32( + , *, i32, i32); @@ -554,6 +579,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16i32( + undef, * %0, i32 %1, i32 %2) @@ -587,6 +613,7 @@ } declare @llvm.riscv.vlse.nxv1f32( + , *, i32, i32); @@ -599,6 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1f32( + undef, * %0, i32 %1, i32 %2) @@ -632,6 +660,7 @@ } declare @llvm.riscv.vlse.nxv2f32( + , *, i32, i32); @@ -644,6 +673,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2f32( + undef, * %0, i32 %1, i32 %2) @@ -677,6 +707,7 @@ } declare @llvm.riscv.vlse.nxv4f32( + , *, i32, i32); @@ -689,6 +720,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4f32( + undef, * %0, i32 %1, i32 %2) @@ -722,6 +754,7 @@ } declare @llvm.riscv.vlse.nxv8f32( + , *, i32, i32); @@ -734,6 +767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8f32( + undef, * %0, i32 %1, i32 %2) @@ -767,6 +801,7 @@ } declare @llvm.riscv.vlse.nxv16f32( + , *, i32, i32); @@ -779,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16f32( + undef, * %0, i32 %1, i32 %2) @@ -812,6 +848,7 @@ } declare @llvm.riscv.vlse.nxv1i16( + , *, i32, i32); @@ -824,6 +861,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i16( + undef, * %0, i32 %1, i32 %2) @@ -857,6 +895,7 @@ } declare @llvm.riscv.vlse.nxv2i16( + , *, i32, i32); @@ -869,6 +908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i16( + undef, * %0, i32 %1, i32 %2) @@ -902,6 +942,7 @@ } declare @llvm.riscv.vlse.nxv4i16( + , *, i32, i32); @@ -914,6 +955,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i16( + undef, * %0, i32 %1, i32 %2) @@ -947,6 +989,7 @@ } declare @llvm.riscv.vlse.nxv8i16( + , *, i32, i32); @@ -959,6 +1002,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i16( + undef, * %0, i32 %1, i32 %2) @@ -992,6 +1036,7 @@ } declare @llvm.riscv.vlse.nxv16i16( + , *, i32, i32); @@ -1004,6 +1049,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16i16( + undef, * %0, i32 %1, i32 %2) @@ -1037,6 +1083,7 @@ } declare @llvm.riscv.vlse.nxv32i16( + , *, i32, i32); @@ -1049,6 +1096,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv32i16( + undef, * %0, i32 %1, i32 %2) @@ -1082,6 +1130,7 @@ } declare @llvm.riscv.vlse.nxv1f16( + , *, i32, i32); @@ -1094,6 +1143,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1f16( + undef, * %0, i32 %1, i32 %2) @@ -1127,6 +1177,7 @@ } declare @llvm.riscv.vlse.nxv2f16( + , *, i32, i32); @@ -1139,6 +1190,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2f16( + undef, * %0, i32 %1, i32 %2) @@ -1172,6 +1224,7 @@ } declare @llvm.riscv.vlse.nxv4f16( + , *, i32, i32); @@ -1184,6 +1237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4f16( + undef, * %0, i32 %1, i32 %2) @@ -1217,6 +1271,7 @@ } declare @llvm.riscv.vlse.nxv8f16( + , *, i32, i32); @@ -1229,6 +1284,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8f16( + undef, * %0, i32 %1, i32 %2) @@ -1262,6 +1318,7 @@ } declare @llvm.riscv.vlse.nxv16f16( + , *, i32, i32); @@ -1274,6 +1331,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16f16( + undef, * %0, i32 %1, i32 %2) @@ -1307,6 +1365,7 @@ } declare @llvm.riscv.vlse.nxv32f16( + , *, i32, i32); @@ -1319,6 +1378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv32f16( + undef, * %0, i32 %1, i32 %2) @@ -1352,6 +1412,7 @@ } declare @llvm.riscv.vlse.nxv1i8( + , *, i32, i32); @@ -1364,6 +1425,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i8( + undef, * %0, i32 %1, i32 %2) @@ -1397,6 +1459,7 @@ } declare @llvm.riscv.vlse.nxv2i8( + , *, i32, i32); @@ -1409,6 +1472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i8( + undef, * %0, i32 %1, i32 %2) @@ -1442,6 +1506,7 @@ } declare @llvm.riscv.vlse.nxv4i8( + , *, i32, i32); @@ -1454,6 +1519,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i8( + undef, * %0, i32 %1, i32 %2) @@ -1487,6 +1553,7 @@ } declare @llvm.riscv.vlse.nxv8i8( + , *, i32, i32); @@ -1499,6 +1566,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i8( + undef, * %0, i32 %1, i32 %2) @@ -1532,6 +1600,7 @@ } declare @llvm.riscv.vlse.nxv16i8( + , *, i32, i32); @@ -1544,6 +1613,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16i8( + undef, * %0, i32 %1, i32 %2) @@ -1577,6 +1647,7 @@ } declare @llvm.riscv.vlse.nxv32i8( + , *, i32, i32); @@ -1589,6 +1660,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv32i8( + undef, * %0, i32 %1, i32 %2) @@ -1622,6 +1694,7 @@ } declare @llvm.riscv.vlse.nxv64i8( + , *, i32, i32); @@ -1634,6 +1707,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv64i8( + undef, * %0, i32 %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vlse.nxv1i64( + , *, i64, i64); @@ -14,6 +15,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i64( + undef, * %0, i64 %1, i64 %2) @@ -47,6 +49,7 @@ } declare @llvm.riscv.vlse.nxv2i64( + , *, i64, i64); @@ -59,6 +62,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i64( + undef, * %0, i64 %1, i64 %2) @@ -92,6 +96,7 @@ } declare @llvm.riscv.vlse.nxv4i64( + , *, i64, i64); @@ -104,6 +109,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i64( + undef, * %0, i64 %1, i64 %2) @@ -137,6 +143,7 @@ } declare @llvm.riscv.vlse.nxv8i64( + , *, i64, i64); @@ -149,6 +156,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i64( + undef, * %0, i64 %1, i64 %2) @@ -182,6 +190,7 @@ } declare @llvm.riscv.vlse.nxv1f64( + , *, i64, i64); @@ -194,6 +203,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1f64( + undef, * %0, i64 %1, i64 %2) @@ -227,6 +237,7 @@ } declare @llvm.riscv.vlse.nxv2f64( + , *, i64, i64); @@ -239,6 +250,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2f64( + undef, * %0, i64 %1, i64 %2) @@ -272,6 +284,7 @@ } declare @llvm.riscv.vlse.nxv4f64( + , *, i64, i64); @@ -284,6 +297,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4f64( + undef, * %0, i64 %1, i64 %2) @@ -317,6 +331,7 @@ } declare @llvm.riscv.vlse.nxv8f64( + , *, i64, i64); @@ -329,6 +344,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8f64( + undef, * %0, i64 %1, i64 %2) @@ -362,6 +378,7 @@ } declare @llvm.riscv.vlse.nxv1i32( + , *, i64, i64); @@ -374,6 +391,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i32( + undef, * %0, i64 %1, i64 %2) @@ -407,6 +425,7 @@ } declare @llvm.riscv.vlse.nxv2i32( + , *, i64, i64); @@ -419,6 +438,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i32( + undef, * %0, i64 %1, i64 %2) @@ -452,6 +472,7 @@ } declare @llvm.riscv.vlse.nxv4i32( + , *, i64, i64); @@ -464,6 +485,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i32( + undef, * %0, i64 %1, i64 %2) @@ -497,6 +519,7 @@ } declare @llvm.riscv.vlse.nxv8i32( + , *, i64, i64); @@ -509,6 +532,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i32( + undef, * %0, i64 %1, i64 %2) @@ -542,6 +566,7 @@ } declare @llvm.riscv.vlse.nxv16i32( + , *, i64, i64); @@ -554,6 +579,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16i32( + undef, * %0, i64 %1, i64 %2) @@ -587,6 +613,7 @@ } declare @llvm.riscv.vlse.nxv1f32( + , *, i64, i64); @@ -599,6 +626,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1f32( + undef, * %0, i64 %1, i64 %2) @@ -632,6 +660,7 @@ } declare @llvm.riscv.vlse.nxv2f32( + , *, i64, i64); @@ -644,6 +673,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2f32( + undef, * %0, i64 %1, i64 %2) @@ -677,6 +707,7 @@ } declare @llvm.riscv.vlse.nxv4f32( + , *, i64, i64); @@ -689,6 +720,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4f32( + undef, * %0, i64 %1, i64 %2) @@ -722,6 +754,7 @@ } declare @llvm.riscv.vlse.nxv8f32( + , *, i64, i64); @@ -734,6 +767,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8f32( + undef, * %0, i64 %1, i64 %2) @@ -767,6 +801,7 @@ } declare @llvm.riscv.vlse.nxv16f32( + , *, i64, i64); @@ -779,6 +814,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16f32( + undef, * %0, i64 %1, i64 %2) @@ -812,6 +848,7 @@ } declare @llvm.riscv.vlse.nxv1i16( + , *, i64, i64); @@ -824,6 +861,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i16( + undef, * %0, i64 %1, i64 %2) @@ -857,6 +895,7 @@ } declare @llvm.riscv.vlse.nxv2i16( + , *, i64, i64); @@ -869,6 +908,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i16( + undef, * %0, i64 %1, i64 %2) @@ -902,6 +942,7 @@ } declare @llvm.riscv.vlse.nxv4i16( + , *, i64, i64); @@ -914,6 +955,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i16( + undef, * %0, i64 %1, i64 %2) @@ -947,6 +989,7 @@ } declare @llvm.riscv.vlse.nxv8i16( + , *, i64, i64); @@ -959,6 +1002,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i16( + undef, * %0, i64 %1, i64 %2) @@ -992,6 +1036,7 @@ } declare @llvm.riscv.vlse.nxv16i16( + , *, i64, i64); @@ -1004,6 +1049,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16i16( + undef, * %0, i64 %1, i64 %2) @@ -1037,6 +1083,7 @@ } declare @llvm.riscv.vlse.nxv32i16( + , *, i64, i64); @@ -1049,6 +1096,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv32i16( + undef, * %0, i64 %1, i64 %2) @@ -1082,6 +1130,7 @@ } declare @llvm.riscv.vlse.nxv1f16( + , *, i64, i64); @@ -1094,6 +1143,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1f16( + undef, * %0, i64 %1, i64 %2) @@ -1127,6 +1177,7 @@ } declare @llvm.riscv.vlse.nxv2f16( + , *, i64, i64); @@ -1139,6 +1190,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2f16( + undef, * %0, i64 %1, i64 %2) @@ -1172,6 +1224,7 @@ } declare @llvm.riscv.vlse.nxv4f16( + , *, i64, i64); @@ -1184,6 +1237,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4f16( + undef, * %0, i64 %1, i64 %2) @@ -1217,6 +1271,7 @@ } declare @llvm.riscv.vlse.nxv8f16( + , *, i64, i64); @@ -1229,6 +1284,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8f16( + undef, * %0, i64 %1, i64 %2) @@ -1262,6 +1318,7 @@ } declare @llvm.riscv.vlse.nxv16f16( + , *, i64, i64); @@ -1274,6 +1331,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16f16( + undef, * %0, i64 %1, i64 %2) @@ -1307,6 +1365,7 @@ } declare @llvm.riscv.vlse.nxv32f16( + , *, i64, i64); @@ -1319,6 +1378,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv32f16( + undef, * %0, i64 %1, i64 %2) @@ -1352,6 +1412,7 @@ } declare @llvm.riscv.vlse.nxv1i8( + , *, i64, i64); @@ -1364,6 +1425,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv1i8( + undef, * %0, i64 %1, i64 %2) @@ -1397,6 +1459,7 @@ } declare @llvm.riscv.vlse.nxv2i8( + , *, i64, i64); @@ -1409,6 +1472,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv2i8( + undef, * %0, i64 %1, i64 %2) @@ -1442,6 +1506,7 @@ } declare @llvm.riscv.vlse.nxv4i8( + , *, i64, i64); @@ -1454,6 +1519,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv4i8( + undef, * %0, i64 %1, i64 %2) @@ -1487,6 +1553,7 @@ } declare @llvm.riscv.vlse.nxv8i8( + , *, i64, i64); @@ -1499,6 +1566,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv8i8( + undef, * %0, i64 %1, i64 %2) @@ -1532,6 +1600,7 @@ } declare @llvm.riscv.vlse.nxv16i8( + , *, i64, i64); @@ -1544,6 +1613,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv16i8( + undef, * %0, i64 %1, i64 %2) @@ -1577,6 +1647,7 @@ } declare @llvm.riscv.vlse.nxv32i8( + , *, i64, i64); @@ -1589,6 +1660,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv32i8( + undef, * %0, i64 %1, i64 %2) @@ -1622,6 +1694,7 @@ } declare @llvm.riscv.vlse.nxv64i8( + , *, i64, i64); @@ -1634,6 +1707,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.nxv64i8( + undef, * %0, i64 %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -3,6 +3,7 @@ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( + , *, , i32); @@ -16,6 +17,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i32( + undef, * %0, %1, i32 %2) @@ -49,6 +51,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( + , *, , i32); @@ -62,6 +65,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i32( + undef, * %0, %1, i32 %2) @@ -95,6 +99,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( + , *, , i32); @@ -108,6 +113,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i32( + undef, * %0, %1, i32 %2) @@ -141,6 +147,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( + , *, , i32); @@ -154,6 +161,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( + undef, * %0, %1, i32 %2) @@ -187,6 +195,7 @@ } declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( + , *, , i32); @@ -200,6 +209,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( + undef, * %0, %1, i32 %2) @@ -233,6 +243,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( + , *, , i32); @@ -246,6 +257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i32( + undef, * %0, %1, i32 %2) @@ -279,6 +291,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( + , *, , i32); @@ -292,6 +305,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i32( + undef, * %0, %1, i32 %2) @@ -325,6 +339,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( + , *, , i32); @@ -338,6 +353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( + undef, * %0, %1, i32 %2) @@ -371,6 +387,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( + , *, , i32); @@ -384,6 +401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( + undef, * %0, %1, i32 %2) @@ -417,6 +435,7 @@ } declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( + , *, , i32); @@ -430,6 +449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( + undef, * %0, %1, i32 %2) @@ -463,6 +483,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( + , *, , i32); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i32( + undef, * %0, %1, i32 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( + , *, , i32); @@ -520,6 +543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i32( + undef, * %0, %1, i32 %2) @@ -553,6 +577,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( + , *, , i32); @@ -565,6 +590,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i32( + undef, * %0, %1, i32 %2) @@ -598,6 +624,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( + , *, , i32); @@ -610,6 +637,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i32( + undef, * %0, %1, i32 %2) @@ -643,6 +671,7 @@ } declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( + , *, , i32); @@ -655,6 +684,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i32( + undef, * %0, %1, i32 %2) @@ -688,6 +718,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( + , *, , i32); @@ -701,6 +732,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( + undef, * %0, %1, i32 %2) @@ -734,6 +766,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( + , *, , i32); @@ -747,6 +780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( + undef, * %0, %1, i32 %2) @@ -780,6 +814,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( + , *, , i32); @@ -793,6 +828,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( + undef, * %0, %1, i32 %2) @@ -826,6 +862,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( + , *, , i32); @@ -839,6 +876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( + undef, * %0, %1, i32 %2) @@ -872,6 +910,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( + , *, , i32); @@ -885,6 +924,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i32( + undef, * %0, %1, i32 %2) @@ -918,6 +958,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( + , *, , i32); @@ -931,6 +972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i32( + undef, * %0, %1, i32 %2) @@ -964,6 +1006,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( + , *, , i32); @@ -977,6 +1020,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( + undef, * %0, %1, i32 %2) @@ -1010,6 +1054,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( + , *, , i32); @@ -1023,6 +1068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( + undef, * %0, %1, i32 %2) @@ -1056,6 +1102,7 @@ } declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( + , *, , i32); @@ -1069,6 +1116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( + undef, * %0, %1, i32 %2) @@ -1102,6 +1150,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( + , *, , i32); @@ -1114,6 +1163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i32( + undef, * %0, %1, i32 %2) @@ -1147,6 +1197,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( + , *, , i32); @@ -1159,6 +1210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i32( + undef, * %0, %1, i32 %2) @@ -1192,6 +1244,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( + , *, , i32); @@ -1204,6 +1257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i32( + undef, * %0, %1, i32 %2) @@ -1237,6 +1291,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( + , *, , i32); @@ -1249,6 +1304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i32( + undef, * %0, %1, i32 %2) @@ -1282,6 +1338,7 @@ } declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( + , *, , i32); @@ -1294,6 +1351,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i32( + undef, * %0, %1, i32 %2) @@ -1327,6 +1385,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( + , *, , i32); @@ -1340,6 +1399,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( + undef, * %0, %1, i32 %2) @@ -1373,6 +1433,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( + , *, , i32); @@ -1386,6 +1447,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( + undef, * %0, %1, i32 %2) @@ -1419,6 +1481,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( + , *, , i32); @@ -1432,6 +1495,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( + undef, * %0, %1, i32 %2) @@ -1465,6 +1529,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( + , *, , i32); @@ -1478,6 +1543,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( + undef, * %0, %1, i32 %2) @@ -1511,6 +1577,7 @@ } declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( + , *, , i32); @@ -1524,6 +1591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i16( + undef, * %0, %1, i32 %2) @@ -1557,6 +1625,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( + , *, , i32); @@ -1570,6 +1639,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i16( + undef, * %0, %1, i32 %2) @@ -1603,6 +1673,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( + , *, , i32); @@ -1616,6 +1687,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i16( + undef, * %0, %1, i32 %2) @@ -1649,6 +1721,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( + , *, , i32); @@ -1662,6 +1735,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( + undef, * %0, %1, i32 %2) @@ -1695,6 +1769,7 @@ } declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( + , *, , i32); @@ -1708,6 +1783,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( + undef, * %0, %1, i32 %2) @@ -1741,6 +1817,7 @@ } declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( + , *, , i32); @@ -1754,6 +1831,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( + undef, * %0, %1, i32 %2) @@ -1787,6 +1865,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( + , *, , i32); @@ -1799,6 +1878,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i16( + undef, * %0, %1, i32 %2) @@ -1832,6 +1912,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( + , *, , i32); @@ -1844,6 +1925,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i16( + undef, * %0, %1, i32 %2) @@ -1877,6 +1959,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( + , *, , i32); @@ -1889,6 +1972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i16( + undef, * %0, %1, i32 %2) @@ -1922,6 +2006,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( + , *, , i32); @@ -1934,6 +2019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i16( + undef, * %0, %1, i32 %2) @@ -1967,6 +2053,7 @@ } declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( + , *, , i32); @@ -1979,6 +2066,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2012,6 +2100,7 @@ } declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( + , *, , i32); @@ -2024,6 +2113,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i16( + undef, * %0, %1, i32 %2) @@ -2057,6 +2147,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( + , *, , i32); @@ -2070,6 +2161,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2103,6 +2195,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( + , *, , i32); @@ -2116,6 +2209,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2149,6 +2243,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( + , *, , i32); @@ -2162,6 +2257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2195,6 +2291,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( + , *, , i32); @@ -2208,6 +2305,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2241,6 +2339,7 @@ } declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( + , *, , i32); @@ -2254,6 +2353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2287,6 +2387,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( + , *, , i32); @@ -2300,6 +2401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2333,6 +2435,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( + , *, , i32); @@ -2346,6 +2449,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2379,6 +2483,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( + , *, , i32); @@ -2392,6 +2497,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2425,6 +2531,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( + , *, , i32); @@ -2438,6 +2545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2471,6 +2579,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( + , *, , i32); @@ -2483,6 +2592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2516,6 +2626,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( + , *, , i32); @@ -2528,6 +2639,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2561,6 +2673,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( + , *, , i32); @@ -2573,6 +2686,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2606,6 +2720,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( + , *, , i32); @@ -2618,6 +2733,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2651,6 +2767,7 @@ } declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( + , *, , i32); @@ -2663,6 +2780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2696,6 +2814,7 @@ } declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( + , *, , i32); @@ -2708,6 +2827,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i16( + undef, * %0, %1, i32 %2) @@ -2741,6 +2861,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( + , *, , i32); @@ -2754,6 +2875,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i16( + undef, * %0, %1, i32 %2) @@ -2787,6 +2909,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( + , *, , i32); @@ -2800,6 +2923,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( + undef, * %0, %1, i32 %2) @@ -2833,6 +2957,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( + , *, , i32); @@ -2846,6 +2971,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( + undef, * %0, %1, i32 %2) @@ -2879,6 +3005,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( + , *, , i32); @@ -2892,6 +3019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( + undef, * %0, %1, i32 %2) @@ -2925,6 +3053,7 @@ } declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( + , *, , i32); @@ -2938,6 +3067,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( + undef, * %0, %1, i32 %2) @@ -2971,6 +3101,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( + , *, , i32); @@ -2984,6 +3115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( + undef, * %0, %1, i32 %2) @@ -3017,6 +3149,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( + , *, , i32); @@ -3030,6 +3163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( + undef, * %0, %1, i32 %2) @@ -3063,6 +3197,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( + , *, , i32); @@ -3076,6 +3211,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( + undef, * %0, %1, i32 %2) @@ -3109,6 +3245,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( + , *, , i32); @@ -3122,6 +3259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( + undef, * %0, %1, i32 %2) @@ -3155,6 +3293,7 @@ } declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( + , *, , i32); @@ -3167,6 +3306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i8( + undef, * %0, %1, i32 %2) @@ -3200,6 +3340,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( + , *, , i32); @@ -3212,6 +3353,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i8( + undef, * %0, %1, i32 %2) @@ -3245,6 +3387,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( + , *, , i32); @@ -3257,6 +3400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i8( + undef, * %0, %1, i32 %2) @@ -3290,6 +3434,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( + , *, , i32); @@ -3302,6 +3447,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i8( + undef, * %0, %1, i32 %2) @@ -3335,6 +3481,7 @@ } declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( + , *, , i32); @@ -3347,6 +3494,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i8( + undef, * %0, %1, i32 %2) @@ -3380,6 +3528,7 @@ } declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( + , *, , i32); @@ -3392,6 +3541,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i8( + undef, * %0, %1, i32 %2) @@ -3425,6 +3575,7 @@ } declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( + , *, , i32); @@ -3437,6 +3588,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv64i8.nxv64i8( + undef, * %0, %1, i32 %2) @@ -3470,6 +3622,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( + , *, , i32); @@ -3483,6 +3636,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i8( + undef, * %0, %1, i32 %2) @@ -3516,6 +3670,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( + , *, , i32); @@ -3529,6 +3684,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i8( + undef, * %0, %1, i32 %2) @@ -3562,6 +3718,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( + , *, , i32); @@ -3575,6 +3732,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( + undef, * %0, %1, i32 %2) @@ -3608,6 +3766,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( + , *, , i32); @@ -3621,6 +3780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( + undef, * %0, %1, i32 %2) @@ -3654,6 +3814,7 @@ } declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( + , *, , i32); @@ -3667,6 +3828,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( + undef, * %0, %1, i32 %2) @@ -3700,6 +3862,7 @@ } declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( + , *, , i32); @@ -3713,6 +3876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( + undef, * %0, %1, i32 %2) @@ -3746,6 +3910,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( + , *, , i32); @@ -3759,6 +3924,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i8( + undef, * %0, %1, i32 %2) @@ -3792,6 +3958,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( + , *, , i32); @@ -3805,6 +3972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( + undef, * %0, %1, i32 %2) @@ -3838,6 +4006,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( + , *, , i32); @@ -3851,6 +4020,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( + undef, * %0, %1, i32 %2) @@ -3884,6 +4054,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( + , *, , i32); @@ -3897,6 +4068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( + undef, * %0, %1, i32 %2) @@ -3930,6 +4102,7 @@ } declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( + , *, , i32); @@ -3943,6 +4116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( + undef, * %0, %1, i32 %2) @@ -3976,6 +4150,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( + , *, , i32); @@ -3989,6 +4164,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4022,6 +4198,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( + , *, , i32); @@ -4035,6 +4212,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4068,6 +4246,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( + , *, , i32); @@ -4081,6 +4260,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4114,6 +4294,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( + , *, , i32); @@ -4127,6 +4308,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( + undef, * %0, %1, i32 %2) @@ -4160,6 +4342,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( + , *, , i32); @@ -4173,6 +4356,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4206,6 +4390,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( + , *, , i32); @@ -4219,6 +4404,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4252,6 +4438,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( + , *, , i32); @@ -4265,6 +4452,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4298,6 +4486,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( + , *, , i32); @@ -4311,6 +4500,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( + undef, * %0, %1, i32 %2) @@ -4344,6 +4534,7 @@ } declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( + , *, , i32); @@ -4357,6 +4548,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( + undef, * %0, %1, i32 %2) @@ -4390,6 +4582,7 @@ } declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( + , *, , i32); @@ -4403,6 +4596,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( + undef, * %0, %1, i32 %2) @@ -4436,6 +4630,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( + , *, , i32); @@ -4449,6 +4644,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4482,6 +4678,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( + , *, , i32); @@ -4495,6 +4692,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4528,6 +4726,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( + , *, , i32); @@ -4541,6 +4740,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4574,6 +4774,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( + , *, , i32); @@ -4587,6 +4788,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( + undef, * %0, %1, i32 %2) @@ -4620,6 +4822,7 @@ } declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( + , *, , i32); @@ -4633,6 +4836,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( + undef, * %0, %1, i32 %2) @@ -4666,6 +4870,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( + , *, , i32); @@ -4679,6 +4884,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( + undef, * %0, %1, i32 %2) @@ -4712,6 +4918,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( + , *, , i32); @@ -4725,6 +4932,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( + undef, * %0, %1, i32 %2) @@ -4758,6 +4966,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( + , *, , i32); @@ -4771,6 +4980,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( + undef, * %0, %1, i32 %2) @@ -4804,6 +5014,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( + , *, , i32); @@ -4817,6 +5028,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( + undef, * %0, %1, i32 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -2,6 +2,7 @@ ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( + , *, , i64); @@ -15,6 +16,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i64( + undef, * %0, %1, i64 %2) @@ -48,6 +50,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( + , *, , i64); @@ -61,6 +64,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i64( + undef, * %0, %1, i64 %2) @@ -94,6 +98,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( + , *, , i64); @@ -107,6 +112,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i64( + undef, * %0, %1, i64 %2) @@ -140,6 +146,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( + , *, , i64); @@ -153,6 +160,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( + undef, * %0, %1, i64 %2) @@ -186,6 +194,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( + , *, , i64); @@ -199,6 +208,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i64( + undef, * %0, %1, i64 %2) @@ -232,6 +242,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( + , *, , i64); @@ -245,6 +256,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i64( + undef, * %0, %1, i64 %2) @@ -278,6 +290,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( + , *, , i64); @@ -291,6 +304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( + undef, * %0, %1, i64 %2) @@ -324,6 +338,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( + , *, , i64); @@ -337,6 +352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( + undef, * %0, %1, i64 %2) @@ -370,6 +386,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( + , *, , i64); @@ -383,6 +400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i64( + undef, * %0, %1, i64 %2) @@ -416,6 +434,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( + , *, , i64); @@ -429,6 +448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( + undef, * %0, %1, i64 %2) @@ -462,6 +482,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( + , *, , i64); @@ -475,6 +496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( + undef, * %0, %1, i64 %2) @@ -508,6 +530,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( + , *, , i64); @@ -521,6 +544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( + undef, * %0, %1, i64 %2) @@ -554,6 +578,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( + , *, , i64); @@ -566,6 +591,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i64( + undef, * %0, %1, i64 %2) @@ -599,6 +625,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( + , *, , i64); @@ -611,6 +638,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i64( + undef, * %0, %1, i64 %2) @@ -644,6 +672,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( + , *, , i64); @@ -656,6 +685,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i64( + undef, * %0, %1, i64 %2) @@ -689,6 +719,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( + , *, , i64); @@ -701,6 +732,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i64( + undef, * %0, %1, i64 %2) @@ -734,6 +766,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( + , *, , i64); @@ -747,6 +780,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i64( + undef, * %0, %1, i64 %2) @@ -780,6 +814,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( + , *, , i64); @@ -793,6 +828,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i64( + undef, * %0, %1, i64 %2) @@ -826,6 +862,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( + , *, , i64); @@ -839,6 +876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( + undef, * %0, %1, i64 %2) @@ -872,6 +910,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( + , *, , i64); @@ -885,6 +924,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( + undef, * %0, %1, i64 %2) @@ -918,6 +958,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( + , *, , i64); @@ -931,6 +972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i64( + undef, * %0, %1, i64 %2) @@ -964,6 +1006,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( + , *, , i64); @@ -977,6 +1020,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( + undef, * %0, %1, i64 %2) @@ -1010,6 +1054,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( + , *, , i64); @@ -1023,6 +1068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( + undef, * %0, %1, i64 %2) @@ -1056,6 +1102,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( + , *, , i64); @@ -1069,6 +1116,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( + undef, * %0, %1, i64 %2) @@ -1102,6 +1150,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( + , *, , i64); @@ -1114,6 +1163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i64( + undef, * %0, %1, i64 %2) @@ -1147,6 +1197,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( + , *, , i64); @@ -1159,6 +1210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i64( + undef, * %0, %1, i64 %2) @@ -1192,6 +1244,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( + , *, , i64); @@ -1204,6 +1257,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i64( + undef, * %0, %1, i64 %2) @@ -1237,6 +1291,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( + , *, , i64); @@ -1249,6 +1304,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i64( + undef, * %0, %1, i64 %2) @@ -1282,6 +1338,7 @@ } declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( + , *, , i64); @@ -1295,6 +1352,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i32( + undef, * %0, %1, i64 %2) @@ -1328,6 +1386,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i32( + , *, , i64); @@ -1341,6 +1400,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i32( + undef, * %0, %1, i64 %2) @@ -1374,6 +1434,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i32( + , *, , i64); @@ -1387,6 +1448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i32( + undef, * %0, %1, i64 %2) @@ -1420,6 +1482,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i32( + , *, , i64); @@ -1433,6 +1496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( + undef, * %0, %1, i64 %2) @@ -1466,6 +1530,7 @@ } declare @llvm.riscv.vluxei.nxv16i8.nxv16i32( + , *, , i64); @@ -1479,6 +1544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( + undef, * %0, %1, i64 %2) @@ -1512,6 +1578,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i32( + , *, , i64); @@ -1525,6 +1592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i32( + undef, * %0, %1, i64 %2) @@ -1558,6 +1626,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i32( + , *, , i64); @@ -1571,6 +1640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i32( + undef, * %0, %1, i64 %2) @@ -1604,6 +1674,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i32( + , *, , i64); @@ -1617,6 +1688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( + undef, * %0, %1, i64 %2) @@ -1650,6 +1722,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i32( + , *, , i64); @@ -1663,6 +1736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( + undef, * %0, %1, i64 %2) @@ -1696,6 +1770,7 @@ } declare @llvm.riscv.vluxei.nxv16i16.nxv16i32( + , *, , i64); @@ -1709,6 +1784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( + undef, * %0, %1, i64 %2) @@ -1742,6 +1818,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i32( + , *, , i64); @@ -1754,6 +1831,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i32( + undef, * %0, %1, i64 %2) @@ -1787,6 +1865,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i32( + , *, , i64); @@ -1799,6 +1878,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i32( + undef, * %0, %1, i64 %2) @@ -1832,6 +1912,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i32( + , *, , i64); @@ -1844,6 +1925,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i32( + undef, * %0, %1, i64 %2) @@ -1877,6 +1959,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i32( + , *, , i64); @@ -1889,6 +1972,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i32( + undef, * %0, %1, i64 %2) @@ -1922,6 +2006,7 @@ } declare @llvm.riscv.vluxei.nxv16i32.nxv16i32( + , *, , i64); @@ -1934,6 +2019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i32( + undef, * %0, %1, i64 %2) @@ -1967,6 +2053,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i32( + , *, , i64); @@ -1980,6 +2067,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2013,6 +2101,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i32( + , *, , i64); @@ -2026,6 +2115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2059,6 +2149,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i32( + , *, , i64); @@ -2072,6 +2163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2105,6 +2197,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i32( + , *, , i64); @@ -2118,6 +2211,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2151,6 +2245,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i32( + , *, , i64); @@ -2164,6 +2259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2197,6 +2293,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i32( + , *, , i64); @@ -2210,6 +2307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2243,6 +2341,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i32( + , *, , i64); @@ -2256,6 +2355,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2289,6 +2389,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i32( + , *, , i64); @@ -2302,6 +2403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2335,6 +2437,7 @@ } declare @llvm.riscv.vluxei.nxv16f16.nxv16i32( + , *, , i64); @@ -2348,6 +2451,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( + undef, * %0, %1, i64 %2) @@ -2381,6 +2485,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i32( + , *, , i64); @@ -2393,6 +2498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2426,6 +2532,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i32( + , *, , i64); @@ -2438,6 +2545,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2471,6 +2579,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i32( + , *, , i64); @@ -2483,6 +2592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2516,6 +2626,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i32( + , *, , i64); @@ -2528,6 +2639,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2561,6 +2673,7 @@ } declare @llvm.riscv.vluxei.nxv16f32.nxv16i32( + , *, , i64); @@ -2573,6 +2686,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i32( + undef, * %0, %1, i64 %2) @@ -2606,6 +2720,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i32( + , *, , i64); @@ -2619,6 +2734,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( + undef, * %0, %1, i64 %2) @@ -2652,6 +2768,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i32( + , *, , i64); @@ -2665,6 +2782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( + undef, * %0, %1, i64 %2) @@ -2698,6 +2816,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i32( + , *, , i64); @@ -2711,6 +2830,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( + undef, * %0, %1, i64 %2) @@ -2744,6 +2864,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i32( + , *, , i64); @@ -2757,6 +2878,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( + undef, * %0, %1, i64 %2) @@ -2790,6 +2912,7 @@ } declare @llvm.riscv.vluxei.nxv1i8.nxv1i16( + , *, , i64); @@ -2803,6 +2926,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i16( + undef, * %0, %1, i64 %2) @@ -2836,6 +2960,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i16( + , *, , i64); @@ -2849,6 +2974,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i16( + undef, * %0, %1, i64 %2) @@ -2882,6 +3008,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i16( + , *, , i64); @@ -2895,6 +3022,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i16( + undef, * %0, %1, i64 %2) @@ -2928,6 +3056,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i16( + , *, , i64); @@ -2941,6 +3070,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( + undef, * %0, %1, i64 %2) @@ -2974,6 +3104,7 @@ } declare @llvm.riscv.vluxei.nxv16i8.nxv16i16( + , *, , i64); @@ -2987,6 +3118,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3020,6 +3152,7 @@ } declare @llvm.riscv.vluxei.nxv32i8.nxv32i16( + , *, , i64); @@ -3033,6 +3166,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( + undef, * %0, %1, i64 %2) @@ -3066,6 +3200,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i16( + , *, , i64); @@ -3078,6 +3213,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3111,6 +3247,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i16( + , *, , i64); @@ -3123,6 +3260,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3156,6 +3294,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i16( + , *, , i64); @@ -3168,6 +3307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3201,6 +3341,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i16( + , *, , i64); @@ -3213,6 +3354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3246,6 +3388,7 @@ } declare @llvm.riscv.vluxei.nxv16i16.nxv16i16( + , *, , i64); @@ -3258,6 +3401,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3291,6 +3435,7 @@ } declare @llvm.riscv.vluxei.nxv32i16.nxv32i16( + , *, , i64); @@ -3303,6 +3448,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i16( + undef, * %0, %1, i64 %2) @@ -3336,6 +3482,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i16( + , *, , i64); @@ -3349,6 +3496,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3382,6 +3530,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i16( + , *, , i64); @@ -3395,6 +3544,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3428,6 +3578,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i16( + , *, , i64); @@ -3441,6 +3592,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3474,6 +3626,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i16( + , *, , i64); @@ -3487,6 +3640,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3520,6 +3674,7 @@ } declare @llvm.riscv.vluxei.nxv16i32.nxv16i16( + , *, , i64); @@ -3533,6 +3688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3566,6 +3722,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i16( + , *, , i64); @@ -3579,6 +3736,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3612,6 +3770,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i16( + , *, , i64); @@ -3625,6 +3784,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3658,6 +3818,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i16( + , *, , i64); @@ -3671,6 +3832,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3704,6 +3866,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i16( + , *, , i64); @@ -3717,6 +3880,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3750,6 +3914,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i16( + , *, , i64); @@ -3762,6 +3927,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i16( + undef, * %0, %1, i64 %2) @@ -3795,6 +3961,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i16( + , *, , i64); @@ -3807,6 +3974,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i16( + undef, * %0, %1, i64 %2) @@ -3840,6 +4008,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i16( + , *, , i64); @@ -3852,6 +4021,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i16( + undef, * %0, %1, i64 %2) @@ -3885,6 +4055,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i16( + , *, , i64); @@ -3897,6 +4068,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i16( + undef, * %0, %1, i64 %2) @@ -3930,6 +4102,7 @@ } declare @llvm.riscv.vluxei.nxv16f16.nxv16i16( + , *, , i64); @@ -3942,6 +4115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i16( + undef, * %0, %1, i64 %2) @@ -3975,6 +4149,7 @@ } declare @llvm.riscv.vluxei.nxv32f16.nxv32i16( + , *, , i64); @@ -3987,6 +4162,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i16( + undef, * %0, %1, i64 %2) @@ -4020,6 +4196,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i16( + , *, , i64); @@ -4033,6 +4210,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i16( + undef, * %0, %1, i64 %2) @@ -4066,6 +4244,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i16( + , *, , i64); @@ -4079,6 +4258,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( + undef, * %0, %1, i64 %2) @@ -4112,6 +4292,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i16( + , *, , i64); @@ -4125,6 +4306,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( + undef, * %0, %1, i64 %2) @@ -4158,6 +4340,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i16( + , *, , i64); @@ -4171,6 +4354,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( + undef, * %0, %1, i64 %2) @@ -4204,6 +4388,7 @@ } declare @llvm.riscv.vluxei.nxv16f32.nxv16i16( + , *, , i64); @@ -4217,6 +4402,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( + undef, * %0, %1, i64 %2) @@ -4250,6 +4436,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i16( + , *, , i64); @@ -4263,6 +4450,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( + undef, * %0, %1, i64 %2) @@ -4296,6 +4484,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i16( + , *, , i64); @@ -4309,6 +4498,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( + undef, * %0, %1, i64 %2) @@ -4342,6 +4532,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i16( + , *, , i64); @@ -4355,6 +4546,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( + undef, * %0, %1, i64 %2) @@ -4388,6 +4580,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i16( + , *, , i64); @@ -4401,6 +4594,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( + undef, * %0, %1, i64 %2) @@ -4434,6 +4628,7 @@ } declare @llvm.riscv.vluxei.nxv1i8.nxv1i8( + , *, , i64); @@ -4446,6 +4641,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i8( + undef, * %0, %1, i64 %2) @@ -4479,6 +4675,7 @@ } declare @llvm.riscv.vluxei.nxv2i8.nxv2i8( + , *, , i64); @@ -4491,6 +4688,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i8( + undef, * %0, %1, i64 %2) @@ -4524,6 +4722,7 @@ } declare @llvm.riscv.vluxei.nxv4i8.nxv4i8( + , *, , i64); @@ -4536,6 +4735,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i8( + undef, * %0, %1, i64 %2) @@ -4569,6 +4769,7 @@ } declare @llvm.riscv.vluxei.nxv8i8.nxv8i8( + , *, , i64); @@ -4581,6 +4782,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i8( + undef, * %0, %1, i64 %2) @@ -4614,6 +4816,7 @@ } declare @llvm.riscv.vluxei.nxv16i8.nxv16i8( + , *, , i64); @@ -4626,6 +4829,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i8( + undef, * %0, %1, i64 %2) @@ -4659,6 +4863,7 @@ } declare @llvm.riscv.vluxei.nxv32i8.nxv32i8( + , *, , i64); @@ -4671,6 +4876,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i8( + undef, * %0, %1, i64 %2) @@ -4704,6 +4910,7 @@ } declare @llvm.riscv.vluxei.nxv64i8.nxv64i8( + , *, , i64); @@ -4716,6 +4923,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv64i8.nxv64i8( + undef, * %0, %1, i64 %2) @@ -4749,6 +4957,7 @@ } declare @llvm.riscv.vluxei.nxv1i16.nxv1i8( + , *, , i64); @@ -4762,6 +4971,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i8( + undef, * %0, %1, i64 %2) @@ -4795,6 +5005,7 @@ } declare @llvm.riscv.vluxei.nxv2i16.nxv2i8( + , *, , i64); @@ -4808,6 +5019,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i8( + undef, * %0, %1, i64 %2) @@ -4841,6 +5053,7 @@ } declare @llvm.riscv.vluxei.nxv4i16.nxv4i8( + , *, , i64); @@ -4854,6 +5067,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( + undef, * %0, %1, i64 %2) @@ -4887,6 +5101,7 @@ } declare @llvm.riscv.vluxei.nxv8i16.nxv8i8( + , *, , i64); @@ -4900,6 +5115,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( + undef, * %0, %1, i64 %2) @@ -4933,6 +5149,7 @@ } declare @llvm.riscv.vluxei.nxv16i16.nxv16i8( + , *, , i64); @@ -4946,6 +5163,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( + undef, * %0, %1, i64 %2) @@ -4979,6 +5197,7 @@ } declare @llvm.riscv.vluxei.nxv32i16.nxv32i8( + , *, , i64); @@ -4992,6 +5211,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( + undef, * %0, %1, i64 %2) @@ -5025,6 +5245,7 @@ } declare @llvm.riscv.vluxei.nxv1i32.nxv1i8( + , *, , i64); @@ -5038,6 +5259,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5071,6 +5293,7 @@ } declare @llvm.riscv.vluxei.nxv2i32.nxv2i8( + , *, , i64); @@ -5084,6 +5307,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5117,6 +5341,7 @@ } declare @llvm.riscv.vluxei.nxv4i32.nxv4i8( + , *, , i64); @@ -5130,6 +5355,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5163,6 +5389,7 @@ } declare @llvm.riscv.vluxei.nxv8i32.nxv8i8( + , *, , i64); @@ -5176,6 +5403,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5209,6 +5437,7 @@ } declare @llvm.riscv.vluxei.nxv16i32.nxv16i8( + , *, , i64); @@ -5222,6 +5451,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( + undef, * %0, %1, i64 %2) @@ -5255,6 +5485,7 @@ } declare @llvm.riscv.vluxei.nxv1i64.nxv1i8( + , *, , i64); @@ -5268,6 +5499,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5301,6 +5533,7 @@ } declare @llvm.riscv.vluxei.nxv2i64.nxv2i8( + , *, , i64); @@ -5314,6 +5547,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5347,6 +5581,7 @@ } declare @llvm.riscv.vluxei.nxv4i64.nxv4i8( + , *, , i64); @@ -5360,6 +5595,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5393,6 +5629,7 @@ } declare @llvm.riscv.vluxei.nxv8i64.nxv8i8( + , *, , i64); @@ -5406,6 +5643,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5439,6 +5677,7 @@ } declare @llvm.riscv.vluxei.nxv1f16.nxv1i8( + , *, , i64); @@ -5452,6 +5691,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5485,6 +5725,7 @@ } declare @llvm.riscv.vluxei.nxv2f16.nxv2i8( + , *, , i64); @@ -5498,6 +5739,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5531,6 +5773,7 @@ } declare @llvm.riscv.vluxei.nxv4f16.nxv4i8( + , *, , i64); @@ -5544,6 +5787,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5577,6 +5821,7 @@ } declare @llvm.riscv.vluxei.nxv8f16.nxv8i8( + , *, , i64); @@ -5590,6 +5835,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5623,6 +5869,7 @@ } declare @llvm.riscv.vluxei.nxv16f16.nxv16i8( + , *, , i64); @@ -5636,6 +5883,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( + undef, * %0, %1, i64 %2) @@ -5669,6 +5917,7 @@ } declare @llvm.riscv.vluxei.nxv32f16.nxv32i8( + , *, , i64); @@ -5682,6 +5931,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( + undef, * %0, %1, i64 %2) @@ -5715,6 +5965,7 @@ } declare @llvm.riscv.vluxei.nxv1f32.nxv1i8( + , *, , i64); @@ -5728,6 +5979,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5761,6 +6013,7 @@ } declare @llvm.riscv.vluxei.nxv2f32.nxv2i8( + , *, , i64); @@ -5774,6 +6027,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( + undef, * %0, %1, i64 %2) @@ -5807,6 +6061,7 @@ } declare @llvm.riscv.vluxei.nxv4f32.nxv4i8( + , *, , i64); @@ -5820,6 +6075,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( + undef, * %0, %1, i64 %2) @@ -5853,6 +6109,7 @@ } declare @llvm.riscv.vluxei.nxv8f32.nxv8i8( + , *, , i64); @@ -5866,6 +6123,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( + undef, * %0, %1, i64 %2) @@ -5899,6 +6157,7 @@ } declare @llvm.riscv.vluxei.nxv16f32.nxv16i8( + , *, , i64); @@ -5912,6 +6171,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( + undef, * %0, %1, i64 %2) @@ -5945,6 +6205,7 @@ } declare @llvm.riscv.vluxei.nxv1f64.nxv1i8( + , *, , i64); @@ -5958,6 +6219,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( + undef, * %0, %1, i64 %2) @@ -5991,6 +6253,7 @@ } declare @llvm.riscv.vluxei.nxv2f64.nxv2i8( + , *, , i64); @@ -6004,6 +6267,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( + undef, * %0, %1, i64 %2) @@ -6037,6 +6301,7 @@ } declare @llvm.riscv.vluxei.nxv4f64.nxv4i8( + , *, , i64); @@ -6050,6 +6315,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( + undef, * %0, %1, i64 %2) @@ -6083,6 +6349,7 @@ } declare @llvm.riscv.vluxei.nxv8f64.nxv8i8( + , *, , i64); @@ -6096,6 +6363,7 @@ ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( + undef, * %0, %1, i64 %2) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -475,10 +475,10 @@ %x.addr.015 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ] %y.addr.014 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ] %2 = bitcast float* %x.addr.015 to * - %3 = tail call @llvm.riscv.vle.nxv16f32.i64(* %2, i64 %1) + %3 = tail call @llvm.riscv.vle.nxv16f32.i64( undef, * %2, i64 %1) %add.ptr = getelementptr inbounds float, float* %x.addr.015, i64 %1 %4 = bitcast float* %y.addr.014 to * - %5 = tail call @llvm.riscv.vle.nxv16f32.i64(* %4, i64 %1) + %5 = tail call @llvm.riscv.vle.nxv16f32.i64( undef, * %4, i64 %1) %6 = tail call @llvm.riscv.vfmacc.nxv16f32.f32.i64( %5, float %a, %3, i64 %1) tail call void @llvm.riscv.vse.nxv16f32.i64( %6, * %4, i64 %1) %add.ptr1 = getelementptr inbounds float, float* %y.addr.014, i64 %1 @@ -492,7 +492,7 @@ } declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv16f32.i64(* nocapture, i64) +declare @llvm.riscv.vle.nxv16f32.i64(, * nocapture, i64) declare @llvm.riscv.vfmacc.nxv16f32.f32.i64(, float, , i64) declare void @llvm.riscv.vse.nxv16f32.i64(, * nocapture, i64) @@ -515,11 +515,11 @@ ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vle.nxv2i32(* %x, i64 %vl) + %a = call @llvm.riscv.vle.nxv2i32( undef, * %x, i64 %vl) br i1 %cond, label %if, label %if.end if: - %b = call @llvm.riscv.vle.nxv2i16(* %y, i64 %vl) + %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) %c = call @llvm.riscv.vwadd.nxv2i32( %b, i16 0, i64 %vl) br label %if.end @@ -528,8 +528,8 @@ %e = call @llvm.riscv.vadd.nxv2i32( %a, %d, i64 %vl) ret %e } -declare @llvm.riscv.vle.nxv2i32(*, i64) -declare @llvm.riscv.vle.nxv2i16(*, i64) +declare @llvm.riscv.vle.nxv2i32(, *, i64) +declare @llvm.riscv.vle.nxv2i16(, *, i64) declare @llvm.riscv.vwadd.nxv2i32(, i16, i64) declare @llvm.riscv.vadd.nxv2i32(, , i64) @@ -561,11 +561,11 @@ ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: ret entry: - %a = call @llvm.riscv.vle.nxv2i32(* %x, i64 %vl) + %a = call @llvm.riscv.vle.nxv2i32( undef, * %x, i64 %vl) br i1 %cond, label %if, label %if.end if: - %b = call @llvm.riscv.vle.nxv2i16(* %y, i64 %vl) + %b = call @llvm.riscv.vle.nxv2i16( undef, * %y, i64 %vl) %c = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %a, %b, i64 %vl) br label %if.end @@ -574,7 +574,7 @@ br i1 %cond2, label %if2, label %if2.end if2: - %e = call @llvm.riscv.vle.nxv2i16(* %z, i64 %vl) + %e = call @llvm.riscv.vle.nxv2i16( undef, * %z, i64 %vl) %f = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( %d, %e, i64 %vl) br label %if2.end diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir @@ -10,7 +10,7 @@ define @load_add_or_sub(i8 zeroext %cond, * %0, %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vle.nxv1i64.i64(* %0, i64 %2) + %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) %tobool = icmp eq i8 %cond, 0 br i1 %tobool, label %if.else, label %if.then @@ -29,7 +29,7 @@ define void @load_zext_or_sext(i8 zeroext %cond, * %0, * %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vle.nxv1i32.i64(* %0, i64 %2) + %a = call @llvm.riscv.vle.nxv1i32.i64( undef, * %0, i64 %2) %tobool = icmp eq i8 %cond, 0 br i1 %tobool, label %if.else, label %if.then @@ -102,10 +102,10 @@ declare @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(, , i64) #1 ; Function Attrs: nounwind readonly - declare @llvm.riscv.vle.nxv1i64.i64(* nocapture, i64) #3 + declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #3 ; Function Attrs: nounwind readonly - declare @llvm.riscv.vle.nxv1i32.i64(* nocapture, i64) #3 + declare @llvm.riscv.vle.nxv1i32.i64(, * nocapture, i64) #3 ; Function Attrs: nounwind writeonly declare void @llvm.riscv.vse.nxv1i64.i64(, * nocapture, i64) #4 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -131,7 +131,7 @@ %i.012 = phi i64 [ %add, %for.body ], [ 0, %entry ] %add.ptr = getelementptr inbounds i32, i32* %A, i64 %i.012 %2 = bitcast i32* %add.ptr to * - %3 = tail call @llvm.riscv.vle.nxv2i32.i64(* %2, i64 %1) + %3 = tail call @llvm.riscv.vle.nxv2i32.i64( undef, * %2, i64 %1) %4 = tail call @llvm.riscv.vmslt.nxv2i32.i32.i64( %3, i32 -2, i64 %1) %5 = tail call @llvm.riscv.vmsgt.nxv2i32.i32.i64( %3, i32 2, i64 %1) %6 = tail call @llvm.riscv.vmor.nxv2i1.i64( %4, %5, i64 %1) @@ -280,7 +280,7 @@ i64) declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) -declare @llvm.riscv.vle.nxv2i32.i64(* nocapture, i64) +declare @llvm.riscv.vle.nxv2i32.i64(, * nocapture, i64) declare @llvm.riscv.vmslt.nxv2i32.i32.i64(, i32, i64) declare @llvm.riscv.vmsgt.nxv2i32.i32.i64(, i32, i64) declare @llvm.riscv.vmor.nxv2i1.i64(, , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -16,14 +16,14 @@ define @load_add(* %0, %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vle.nxv1i64.i64(* %0, i64 %2) + %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) ret %b } define @load_zext(* %0, i64 %1) #0 { entry: - %a = call @llvm.riscv.vle.nxv1i32.i64(* %0, i64 %1) + %a = call @llvm.riscv.vle.nxv1i32.i64( undef, * %0, i64 %1) %b = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( %a, i64 %1) ret %b } @@ -66,7 +66,7 @@ define @load_add_inlineasm(* %0, %1, i64 %2) #0 { entry: - %a = call @llvm.riscv.vle.nxv1i64.i64(* %0, i64 %2) + %a = call @llvm.riscv.vle.nxv1i64.i64( undef, * %0, i64 %2) call void asm sideeffect "", ""() %b = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( %a, %1, i64 %2) ret %b @@ -76,10 +76,10 @@ declare @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(, , i64) #1 ; Function Attrs: nounwind readonly - declare @llvm.riscv.vle.nxv1i64.i64(* nocapture, i64) #4 + declare @llvm.riscv.vle.nxv1i64.i64(, * nocapture, i64) #4 ; Function Attrs: nounwind readonly - declare @llvm.riscv.vle.nxv1i32.i64(* nocapture, i64) #4 + declare @llvm.riscv.vle.nxv1i32.i64(, * nocapture, i64) #4 ; Function Attrs: nounwind readnone declare @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(, i64) #1