diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2197,7 +2197,7 @@ let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", MaskedPolicy = NonePolicy, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; - IntrinsicTypes = {ResultType, Ops[0]->getType()}; + IntrinsicTypes = {ResultType, Ops[0]->getType(), Int64Ty}; Ops.push_back(ConstantInt::get(Int64Ty, 0)); return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, ""); } }] in { @@ -2243,7 +2243,7 @@ Ops[1] = Builder.CreateMul(Ops[1], ConstantInt::get(Ops[1]->getType(), VecTy->getMinNumElements())); - IntrinsicTypes = {ResultType, Ops[0]->getType()}; + IntrinsicTypes = {ResultType, Ops[0]->getType(), Ops[1]->getType()}; return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, ""); } }] in { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c @@ -1087,7 +1087,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { @@ -1096,7 +1096,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { @@ -1105,7 +1105,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { @@ -1114,7 +1114,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { @@ -1123,7 +1123,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { @@ -1132,7 +1132,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { @@ -1141,7 +1141,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { @@ -1150,7 +1150,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { @@ -1159,7 +1159,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { @@ -1168,7 +1168,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { @@ -1177,7 +1177,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { @@ -1186,7 +1186,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { @@ -1195,7 +1195,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { @@ -1204,7 +1204,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { @@ -1213,7 +1213,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { @@ -1222,7 +1222,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { @@ -1231,7 +1231,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { @@ -1240,7 +1240,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { @@ -1249,7 +1249,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { @@ -1258,7 +1258,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { @@ -1267,7 +1267,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { @@ -1276,7 +1276,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { @@ -1285,7 +1285,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { @@ -1294,7 +1294,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { @@ -1303,7 +1303,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { @@ -1312,7 +1312,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { @@ -1321,7 +1321,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { @@ -1330,7 +1330,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { @@ -1348,7 +1348,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { @@ -1357,7 +1357,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { @@ -1366,7 +1366,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { @@ -1375,7 +1375,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { @@ -1384,7 +1384,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { @@ -1393,7 +1393,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { @@ -1402,7 +1402,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { @@ -1411,7 +1411,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { @@ -1420,7 +1420,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { @@ -1429,7 +1429,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { @@ -1438,7 +1438,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { @@ -1456,7 +1456,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { @@ -1465,7 +1465,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { @@ -1474,7 +1474,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { @@ -1483,7 +1483,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { @@ -1492,7 +1492,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { @@ -1501,7 +1501,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { @@ -1510,7 +1510,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { @@ -1519,7 +1519,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { @@ -1528,7 +1528,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { @@ -1537,7 +1537,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { @@ -1546,7 +1546,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { @@ -1555,7 +1555,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { @@ -1564,7 +1564,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { @@ -1573,7 +1573,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { @@ -1582,7 +1582,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { @@ -1591,7 +1591,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { @@ -1600,7 +1600,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { @@ -1609,7 +1609,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { @@ -1627,7 +1627,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { @@ -1636,7 +1636,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { @@ -1645,7 +1645,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { @@ -1654,7 +1654,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { @@ -1663,7 +1663,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { @@ -1681,7 +1681,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { @@ -1699,7 +1699,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { @@ -1717,7 +1717,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { @@ -1726,7 +1726,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { @@ -1735,7 +1735,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { @@ -1744,7 +1744,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { @@ -1753,7 +1753,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { @@ -1762,7 +1762,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { @@ -1771,7 +1771,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { @@ -1780,7 +1780,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { @@ -1789,7 +1789,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { @@ -1798,7 +1798,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { @@ -1807,7 +1807,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { @@ -1816,7 +1816,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { @@ -1825,7 +1825,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { @@ -1834,7 +1834,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { @@ -1843,7 +1843,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { @@ -1852,7 +1852,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { @@ -1861,7 +1861,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { @@ -1870,7 +1870,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { @@ -1879,7 +1879,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { @@ -1888,7 +1888,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { @@ -1897,7 +1897,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { @@ -1906,7 +1906,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { @@ -1915,7 +1915,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { @@ -1924,7 +1924,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { @@ -1933,7 +1933,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { @@ -1942,7 +1942,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { @@ -1951,7 +1951,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { @@ -1960,7 +1960,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { @@ -1969,7 +1969,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { @@ -1978,7 +1978,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { @@ -1987,7 +1987,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { @@ -1996,7 +1996,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { @@ -2005,7 +2005,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { @@ -2014,7 +2014,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { @@ -2023,7 +2023,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { @@ -2032,7 +2032,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { @@ -2041,7 +2041,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { @@ -2050,7 +2050,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { @@ -2059,7 +2059,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { @@ -2068,7 +2068,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { @@ -2077,7 +2077,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { @@ -2086,7 +2086,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { @@ -2095,7 +2095,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { @@ -2104,7 +2104,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { @@ -2113,7 +2113,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { @@ -2122,7 +2122,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { @@ -2131,7 +2131,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { @@ -2140,7 +2140,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { @@ -2149,7 +2149,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { @@ -2158,7 +2158,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[SRC:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[SRC:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32.i64( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) { @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32.i64( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32.i64( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64.i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64.i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64.i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64.i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64.i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16.i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vget_v_f16m8_f16m4 (vfloat16m8_t src) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c @@ -1088,7 +1088,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { @@ -1106,7 +1106,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { @@ -1115,7 +1115,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { @@ -1124,7 +1124,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { @@ -1133,7 +1133,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { @@ -1142,7 +1142,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { @@ -1151,7 +1151,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { @@ -1160,7 +1160,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { @@ -1178,7 +1178,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { @@ -1187,7 +1187,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { @@ -1196,7 +1196,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { @@ -1205,7 +1205,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { @@ -1214,7 +1214,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { @@ -1223,7 +1223,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { @@ -1232,7 +1232,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { @@ -1241,7 +1241,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { @@ -1250,7 +1250,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { @@ -1259,7 +1259,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { @@ -1268,7 +1268,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { @@ -1277,7 +1277,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { @@ -1286,7 +1286,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { @@ -1295,7 +1295,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { @@ -1304,7 +1304,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { @@ -1313,7 +1313,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { @@ -1322,7 +1322,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { @@ -1331,7 +1331,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { @@ -1340,7 +1340,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { @@ -1349,7 +1349,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { @@ -1358,7 +1358,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { @@ -1376,7 +1376,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { @@ -1385,7 +1385,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { @@ -1394,7 +1394,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { @@ -1403,7 +1403,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { @@ -1412,7 +1412,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { @@ -1421,7 +1421,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { @@ -1430,7 +1430,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { @@ -1439,7 +1439,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { @@ -1448,7 +1448,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { @@ -1457,7 +1457,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { @@ -1466,7 +1466,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { @@ -1475,7 +1475,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { @@ -1484,7 +1484,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { @@ -1493,7 +1493,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { @@ -1502,7 +1502,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { @@ -1511,7 +1511,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { @@ -1520,7 +1520,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { @@ -1529,7 +1529,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { @@ -1547,7 +1547,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { @@ -1556,7 +1556,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { @@ -1565,7 +1565,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { @@ -1574,7 +1574,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { @@ -1583,7 +1583,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { @@ -1592,7 +1592,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { @@ -1601,7 +1601,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { @@ -1610,7 +1610,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { @@ -1619,7 +1619,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { @@ -1628,7 +1628,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { @@ -1637,7 +1637,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { @@ -1646,7 +1646,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { @@ -1655,7 +1655,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { @@ -1664,7 +1664,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { @@ -1673,7 +1673,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { @@ -1682,7 +1682,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { @@ -1691,7 +1691,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { @@ -1700,7 +1700,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { @@ -1709,7 +1709,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { @@ -1718,7 +1718,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { @@ -1727,7 +1727,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { @@ -1736,7 +1736,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { @@ -1745,7 +1745,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { @@ -1754,7 +1754,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { @@ -1763,7 +1763,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { @@ -1772,7 +1772,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { @@ -1781,7 +1781,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { @@ -1790,7 +1790,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { @@ -1799,7 +1799,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { @@ -1808,7 +1808,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { @@ -1817,7 +1817,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { @@ -1826,7 +1826,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { @@ -1835,7 +1835,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { @@ -1844,7 +1844,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { @@ -1853,7 +1853,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { @@ -1862,7 +1862,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { @@ -1871,7 +1871,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { @@ -1880,7 +1880,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { @@ -1898,7 +1898,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { @@ -1907,7 +1907,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { @@ -1916,7 +1916,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { @@ -1925,7 +1925,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { @@ -1934,7 +1934,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { @@ -1943,7 +1943,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { @@ -1952,7 +1952,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { @@ -1961,7 +1961,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { @@ -1970,7 +1970,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { @@ -1979,7 +1979,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { @@ -1988,7 +1988,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { @@ -1997,7 +1997,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { @@ -2006,7 +2006,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { @@ -2015,7 +2015,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { @@ -2024,7 +2024,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { @@ -2033,7 +2033,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { @@ -2042,7 +2042,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { @@ -2051,7 +2051,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { @@ -2060,7 +2060,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { @@ -2069,7 +2069,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { @@ -2078,7 +2078,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { @@ -2087,7 +2087,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { @@ -2096,7 +2096,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { @@ -2105,7 +2105,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { @@ -2114,7 +2114,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { @@ -2123,7 +2123,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { @@ -2132,7 +2132,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { @@ -2141,7 +2141,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { @@ -2150,7 +2150,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { @@ -2159,7 +2159,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { @@ -2303,7 +2303,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv2f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4 (vfloat16mf2_t op1) { @@ -2312,7 +2312,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv4f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4 (vfloat16m1_t op1) { @@ -2321,7 +2321,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv4f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2 (vfloat16m1_t op1) { @@ -2330,7 +2330,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv8f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4 (vfloat16m2_t op1) { @@ -2339,7 +2339,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2 (vfloat16m2_t op1) { @@ -2348,7 +2348,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1 (vfloat16m2_t op1) { @@ -2357,7 +2357,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv16f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4 (vfloat16m4_t op1) { @@ -2366,7 +2366,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2 (vfloat16m4_t op1) { @@ -2375,7 +2375,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1 (vfloat16m4_t op1) { @@ -2384,7 +2384,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2 (vfloat16m4_t op1) { @@ -2393,7 +2393,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv32f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4 (vfloat16m8_t op1) { @@ -2402,7 +2402,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv32f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2 (vfloat16m8_t op1) { @@ -2411,7 +2411,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1 (vfloat16m8_t op1) { @@ -2420,7 +2420,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2 (vfloat16m8_t op1) { @@ -2429,7 +2429,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16.i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4 (vfloat16m8_t op1) { diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c @@ -55,7 +55,7 @@ // CHECK128-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) // CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) // CHECK128-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP0]], [[CASTSCALABLESVE]], i32 1) -// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[TMP1]], i64 0) +// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8.i64( [[TMP1]], i64 0) // CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]] // CHECK-LABEL: define{{.*}} void @f2( @@ -65,7 +65,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP1]], [[CASTSCALABLESVE]], i32 1) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8.i64( [[TMP2]], i64 0) // CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); } @@ -80,14 +80,14 @@ // CHECK128-LABEL: define{{.*}} void @g( noundef %x.coerce) // CHECK128-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8.i64( [[X_COERCE:%.*]], i64 0) // CHECK128-NEXT: call void @f3(<16 x i8> noundef [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECK-LABEL: define{{.*}} void @g( noundef %x.coerce) // CHECK-NEXT: entry: // CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 -// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8.i64( [[X_COERCE:%.*]], i64 0) // CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] // CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp @@ -49,8 +49,8 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EES_( noundef %x.coerce, noundef %y.coerce) // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32.i64( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32.i64( [[X_COERCE1:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32( undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -68,11 +68,11 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EE( noundef %x.coerce) // CHECK-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16.i64( [[X_COERCE:%.*]], i64 0) // CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> noundef [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16 -// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16.i64( [[X_COERCE:%.*]], i64 0) // CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], <[[#div(VBITS, 16)]] x i16>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]] // CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(<[[#div(VBITS, 16)]] x i16>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECKWIDE-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c b/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c @@ -29,8 +29,8 @@ // CHECK-LABEL: @add_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -41,8 +41,8 @@ // CHECK-LABEL: @add_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -53,8 +53,8 @@ // CHECK-LABEL: @add_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -65,8 +65,8 @@ // CHECK-LABEL: @add_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -77,8 +77,8 @@ // CHECK-LABEL: @add_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -89,8 +89,8 @@ // CHECK-LABEL: @add_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -101,8 +101,8 @@ // CHECK-LABEL: @add_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -113,8 +113,8 @@ // CHECK-LABEL: @add_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -125,8 +125,8 @@ // CHECK-LABEL: @add_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV]], [[CONV2]] @@ -140,8 +140,8 @@ // CHECK-LABEL: @add_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -152,8 +152,8 @@ // CHECK-LABEL: @add_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -164,8 +164,8 @@ // CHECK-LABEL: @add_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -176,8 +176,8 @@ // CHECK-LABEL: @add_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -188,8 +188,8 @@ // CHECK-LABEL: @add_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -200,8 +200,8 @@ // CHECK-LABEL: @add_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -212,8 +212,8 @@ // CHECK-LABEL: @add_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -224,8 +224,8 @@ // CHECK-LABEL: @add_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -236,8 +236,8 @@ // CHECK-LABEL: @add_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -248,8 +248,8 @@ // CHECK-LABEL: @add_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -260,8 +260,8 @@ // CHECK-LABEL: @add_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV2]], [[CONV]] @@ -275,8 +275,8 @@ // CHECK-LABEL: @add_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -287,8 +287,8 @@ // CHECK-LABEL: @add_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -299,7 +299,7 @@ // CHECK-LABEL: @add_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -312,7 +312,7 @@ // CHECK-LABEL: @add_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -325,7 +325,7 @@ // CHECK-LABEL: @add_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -338,7 +338,7 @@ // CHECK-LABEL: @add_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -351,7 +351,7 @@ // CHECK-LABEL: @add_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -364,7 +364,7 @@ // CHECK-LABEL: @add_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -377,7 +377,7 @@ // CHECK-LABEL: @add_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -390,7 +390,7 @@ // CHECK-LABEL: @add_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -403,7 +403,7 @@ // CHECK-LABEL: @add_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x half> [[A]], [[SPLAT_SPLAT]] @@ -416,7 +416,7 @@ // CHECK-LABEL: @add_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[SPLAT_SPLAT]] @@ -429,7 +429,7 @@ // CHECK-LABEL: @add_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[SPLAT_SPLAT]] @@ -444,8 +444,8 @@ // CHECK-LABEL: @sub_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -456,8 +456,8 @@ // CHECK-LABEL: @sub_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -468,8 +468,8 @@ // CHECK-LABEL: @sub_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -480,8 +480,8 @@ // CHECK-LABEL: @sub_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -492,8 +492,8 @@ // CHECK-LABEL: @sub_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -504,8 +504,8 @@ // CHECK-LABEL: @sub_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -516,8 +516,8 @@ // CHECK-LABEL: @sub_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -528,8 +528,8 @@ // CHECK-LABEL: @sub_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -540,8 +540,8 @@ // CHECK-LABEL: @sub_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]] @@ -555,8 +555,8 @@ // CHECK-LABEL: @sub_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -567,8 +567,8 @@ // CHECK-LABEL: @sub_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -579,8 +579,8 @@ // CHECK-LABEL: @sub_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -591,8 +591,8 @@ // CHECK-LABEL: @sub_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -603,8 +603,8 @@ // CHECK-LABEL: @sub_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -615,8 +615,8 @@ // CHECK-LABEL: @sub_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -627,8 +627,8 @@ // CHECK-LABEL: @sub_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -639,8 +639,8 @@ // CHECK-LABEL: @sub_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -651,8 +651,8 @@ // CHECK-LABEL: @sub_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -663,8 +663,8 @@ // CHECK-LABEL: @sub_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -675,8 +675,8 @@ // CHECK-LABEL: @sub_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]] @@ -690,8 +690,8 @@ // CHECK-LABEL: @sub_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -702,8 +702,8 @@ // CHECK-LABEL: @sub_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -714,7 +714,7 @@ // CHECK-LABEL: @sub_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -727,7 +727,7 @@ // CHECK-LABEL: @sub_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -740,7 +740,7 @@ // CHECK-LABEL: @sub_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -753,7 +753,7 @@ // CHECK-LABEL: @sub_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -766,7 +766,7 @@ // CHECK-LABEL: @sub_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -779,7 +779,7 @@ // CHECK-LABEL: @sub_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -792,7 +792,7 @@ // CHECK-LABEL: @sub_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -805,7 +805,7 @@ // CHECK-LABEL: @sub_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -818,7 +818,7 @@ // CHECK-LABEL: @sub_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x half> [[A]], [[SPLAT_SPLAT]] @@ -831,7 +831,7 @@ // CHECK-LABEL: @sub_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[SPLAT_SPLAT]] @@ -844,7 +844,7 @@ // CHECK-LABEL: @sub_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[SPLAT_SPLAT]] @@ -859,8 +859,8 @@ // CHECK-LABEL: @mul_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -871,8 +871,8 @@ // CHECK-LABEL: @mul_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -883,8 +883,8 @@ // CHECK-LABEL: @mul_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -895,8 +895,8 @@ // CHECK-LABEL: @mul_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -907,8 +907,8 @@ // CHECK-LABEL: @mul_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -919,8 +919,8 @@ // CHECK-LABEL: @mul_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -931,8 +931,8 @@ // CHECK-LABEL: @mul_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -943,8 +943,8 @@ // CHECK-LABEL: @mul_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -955,8 +955,8 @@ // CHECK-LABEL: @mul_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]] @@ -970,8 +970,8 @@ // CHECK-LABEL: @mul_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -982,8 +982,8 @@ // CHECK-LABEL: @mul_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -994,8 +994,8 @@ // CHECK-LABEL: @mul_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1006,8 +1006,8 @@ // CHECK-LABEL: @mul_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1018,8 +1018,8 @@ // CHECK-LABEL: @mul_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1030,8 +1030,8 @@ // CHECK-LABEL: @mul_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1042,8 +1042,8 @@ // CHECK-LABEL: @mul_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1054,8 +1054,8 @@ // CHECK-LABEL: @mul_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1066,8 +1066,8 @@ // CHECK-LABEL: @mul_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1078,8 +1078,8 @@ // CHECK-LABEL: @mul_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1090,8 +1090,8 @@ // CHECK-LABEL: @mul_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]] @@ -1105,8 +1105,8 @@ // CHECK-LABEL: @mul_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1117,8 +1117,8 @@ // CHECK-LABEL: @mul_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1129,7 +1129,7 @@ // CHECK-LABEL: @mul_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -1142,7 +1142,7 @@ // CHECK-LABEL: @mul_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -1155,7 +1155,7 @@ // CHECK-LABEL: @mul_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -1168,7 +1168,7 @@ // CHECK-LABEL: @mul_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -1181,7 +1181,7 @@ // CHECK-LABEL: @mul_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -1194,7 +1194,7 @@ // CHECK-LABEL: @mul_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -1207,7 +1207,7 @@ // CHECK-LABEL: @mul_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -1220,7 +1220,7 @@ // CHECK-LABEL: @mul_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -1233,7 +1233,7 @@ // CHECK-LABEL: @mul_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x half> [[A]], [[SPLAT_SPLAT]] @@ -1246,7 +1246,7 @@ // CHECK-LABEL: @mul_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[SPLAT_SPLAT]] @@ -1259,7 +1259,7 @@ // CHECK-LABEL: @mul_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[SPLAT_SPLAT]] @@ -1274,8 +1274,8 @@ // CHECK-LABEL: @div_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1286,8 +1286,8 @@ // CHECK-LABEL: @div_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1298,8 +1298,8 @@ // CHECK-LABEL: @div_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1310,8 +1310,8 @@ // CHECK-LABEL: @div_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1322,8 +1322,8 @@ // CHECK-LABEL: @div_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1334,8 +1334,8 @@ // CHECK-LABEL: @div_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1346,8 +1346,8 @@ // CHECK-LABEL: @div_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1358,8 +1358,8 @@ // CHECK-LABEL: @div_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1370,8 +1370,8 @@ // CHECK-LABEL: @div_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]] @@ -1385,8 +1385,8 @@ // CHECK-LABEL: @div_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1397,8 +1397,8 @@ // CHECK-LABEL: @div_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1409,8 +1409,8 @@ // CHECK-LABEL: @div_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1421,8 +1421,8 @@ // CHECK-LABEL: @div_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1433,8 +1433,8 @@ // CHECK-LABEL: @div_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1445,8 +1445,8 @@ // CHECK-LABEL: @div_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1457,8 +1457,8 @@ // CHECK-LABEL: @div_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1469,8 +1469,8 @@ // CHECK-LABEL: @div_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1481,8 +1481,8 @@ // CHECK-LABEL: @div_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1493,8 +1493,8 @@ // CHECK-LABEL: @div_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1505,8 +1505,8 @@ // CHECK-LABEL: @div_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]] @@ -1520,8 +1520,8 @@ // CHECK-LABEL: @div_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1532,8 +1532,8 @@ // CHECK-LABEL: @div_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1544,7 +1544,7 @@ // CHECK-LABEL: @div_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -1557,7 +1557,7 @@ // CHECK-LABEL: @div_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -1570,7 +1570,7 @@ // CHECK-LABEL: @div_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -1583,7 +1583,7 @@ // CHECK-LABEL: @div_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -1596,7 +1596,7 @@ // CHECK-LABEL: @div_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -1609,7 +1609,7 @@ // CHECK-LABEL: @div_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -1622,7 +1622,7 @@ // CHECK-LABEL: @div_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -1635,7 +1635,7 @@ // CHECK-LABEL: @div_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -1648,7 +1648,7 @@ // CHECK-LABEL: @div_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x half> [[A]], [[SPLAT_SPLAT]] @@ -1661,7 +1661,7 @@ // CHECK-LABEL: @div_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[SPLAT_SPLAT]] @@ -1674,7 +1674,7 @@ // CHECK-LABEL: @div_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[SPLAT_SPLAT]] @@ -1689,8 +1689,8 @@ // CHECK-LABEL: @rem_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1701,8 +1701,8 @@ // CHECK-LABEL: @rem_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1713,8 +1713,8 @@ // CHECK-LABEL: @rem_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1725,8 +1725,8 @@ // CHECK-LABEL: @rem_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1737,8 +1737,8 @@ // CHECK-LABEL: @rem_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1749,8 +1749,8 @@ // CHECK-LABEL: @rem_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1761,8 +1761,8 @@ // CHECK-LABEL: @rem_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1773,8 +1773,8 @@ // CHECK-LABEL: @rem_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1785,8 +1785,8 @@ // CHECK-LABEL: @rem_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1797,8 +1797,8 @@ // CHECK-LABEL: @rem_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1809,8 +1809,8 @@ // CHECK-LABEL: @rem_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1821,8 +1821,8 @@ // CHECK-LABEL: @rem_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1833,8 +1833,8 @@ // CHECK-LABEL: @rem_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1845,8 +1845,8 @@ // CHECK-LABEL: @rem_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1857,8 +1857,8 @@ // CHECK-LABEL: @rem_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1869,8 +1869,8 @@ // CHECK-LABEL: @rem_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -1881,7 +1881,7 @@ // CHECK-LABEL: @rem_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -1894,7 +1894,7 @@ // CHECK-LABEL: @rem_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -1907,7 +1907,7 @@ // CHECK-LABEL: @rem_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -1920,7 +1920,7 @@ // CHECK-LABEL: @rem_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -1933,7 +1933,7 @@ // CHECK-LABEL: @rem_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[SPLAT_SPLAT]] @@ -1946,7 +1946,7 @@ // CHECK-LABEL: @rem_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[SPLAT_SPLAT]] @@ -1959,7 +1959,7 @@ // CHECK-LABEL: @rem_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -1972,7 +1972,7 @@ // CHECK-LABEL: @rem_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[SPLAT_SPLAT]] diff --git a/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c b/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c @@ -30,9 +30,9 @@ // CHECK-LABEL: @and_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[AND]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to @@ -44,8 +44,8 @@ // CHECK-LABEL: @and_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -56,8 +56,8 @@ // CHECK-LABEL: @and_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -68,8 +68,8 @@ // CHECK-LABEL: @and_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -80,8 +80,8 @@ // CHECK-LABEL: @and_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -92,8 +92,8 @@ // CHECK-LABEL: @and_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -104,8 +104,8 @@ // CHECK-LABEL: @and_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -116,8 +116,8 @@ // CHECK-LABEL: @and_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -128,8 +128,8 @@ // CHECK-LABEL: @and_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -143,9 +143,9 @@ // CHECK-LABEL: @or_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[OR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to @@ -157,8 +157,8 @@ // CHECK-LABEL: @or_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -169,8 +169,8 @@ // CHECK-LABEL: @or_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -181,8 +181,8 @@ // CHECK-LABEL: @or_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -193,8 +193,8 @@ // CHECK-LABEL: @or_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -205,8 +205,8 @@ // CHECK-LABEL: @or_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -217,8 +217,8 @@ // CHECK-LABEL: @or_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -229,8 +229,8 @@ // CHECK-LABEL: @or_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -241,8 +241,8 @@ // CHECK-LABEL: @or_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -256,9 +256,9 @@ // CHECK-LABEL: @xor_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[XOR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to @@ -270,8 +270,8 @@ // CHECK-LABEL: @xor_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -282,8 +282,8 @@ // CHECK-LABEL: @xor_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -294,8 +294,8 @@ // CHECK-LABEL: @xor_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -306,8 +306,8 @@ // CHECK-LABEL: @xor_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -318,8 +318,8 @@ // CHECK-LABEL: @xor_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -330,8 +330,8 @@ // CHECK-LABEL: @xor_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -342,8 +342,8 @@ // CHECK-LABEL: @xor_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -354,8 +354,8 @@ // CHECK-LABEL: @xor_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -369,7 +369,7 @@ // CHECK-LABEL: @neg_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i8> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[NEG]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to @@ -381,7 +381,7 @@ // CHECK-LABEL: @neg_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -392,7 +392,7 @@ // CHECK-LABEL: @neg_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -403,7 +403,7 @@ // CHECK-LABEL: @neg_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -414,7 +414,7 @@ // CHECK-LABEL: @neg_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -425,7 +425,7 @@ // CHECK-LABEL: @neg_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -436,7 +436,7 @@ // CHECK-LABEL: @neg_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -447,7 +447,7 @@ // CHECK-LABEL: @neg_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -458,7 +458,7 @@ // CHECK-LABEL: @neg_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] diff --git a/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c b/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c @@ -30,9 +30,9 @@ // CHECK-LABEL: @eq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) @@ -45,8 +45,8 @@ // CHECK-LABEL: @eq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -58,8 +58,8 @@ // CHECK-LABEL: @eq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -71,8 +71,8 @@ // CHECK-LABEL: @eq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -84,8 +84,8 @@ // CHECK-LABEL: @eq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -97,8 +97,8 @@ // CHECK-LABEL: @eq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -110,8 +110,8 @@ // CHECK-LABEL: @eq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -123,8 +123,8 @@ // CHECK-LABEL: @eq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -136,8 +136,8 @@ // CHECK-LABEL: @eq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -149,8 +149,8 @@ // CHECK-LABEL: @eq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <32 x float> [[CONV]], [[CONV2]] @@ -165,8 +165,8 @@ // CHECK-LABEL: @eq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -178,8 +178,8 @@ // CHECK-LABEL: @eq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -194,9 +194,9 @@ // CHECK-LABEL: @neq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) @@ -209,8 +209,8 @@ // CHECK-LABEL: @neq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -222,8 +222,8 @@ // CHECK-LABEL: @neq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -235,8 +235,8 @@ // CHECK-LABEL: @neq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -248,8 +248,8 @@ // CHECK-LABEL: @neq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -261,8 +261,8 @@ // CHECK-LABEL: @neq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -274,8 +274,8 @@ // CHECK-LABEL: @neq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -287,8 +287,8 @@ // CHECK-LABEL: @neq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -300,8 +300,8 @@ // CHECK-LABEL: @neq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -313,8 +313,8 @@ // CHECK-LABEL: @neq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp une <32 x float> [[CONV]], [[CONV2]] @@ -329,8 +329,8 @@ // CHECK-LABEL: @neq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp une <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -342,8 +342,8 @@ // CHECK-LABEL: @neq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp une <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -358,9 +358,9 @@ // CHECK-LABEL: @lt_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) @@ -373,8 +373,8 @@ // CHECK-LABEL: @lt_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -386,8 +386,8 @@ // CHECK-LABEL: @lt_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -399,8 +399,8 @@ // CHECK-LABEL: @lt_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -412,8 +412,8 @@ // CHECK-LABEL: @lt_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -425,8 +425,8 @@ // CHECK-LABEL: @lt_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -438,8 +438,8 @@ // CHECK-LABEL: @lt_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -451,8 +451,8 @@ // CHECK-LABEL: @lt_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -464,8 +464,8 @@ // CHECK-LABEL: @lt_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -477,8 +477,8 @@ // CHECK-LABEL: @lt_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <32 x float> [[CONV]], [[CONV2]] @@ -493,8 +493,8 @@ // CHECK-LABEL: @lt_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -506,8 +506,8 @@ // CHECK-LABEL: @lt_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -522,9 +522,9 @@ // CHECK-LABEL: @leq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) @@ -537,8 +537,8 @@ // CHECK-LABEL: @leq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -550,8 +550,8 @@ // CHECK-LABEL: @leq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -563,8 +563,8 @@ // CHECK-LABEL: @leq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -576,8 +576,8 @@ // CHECK-LABEL: @leq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -589,8 +589,8 @@ // CHECK-LABEL: @leq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -602,8 +602,8 @@ // CHECK-LABEL: @leq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -615,8 +615,8 @@ // CHECK-LABEL: @leq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -628,8 +628,8 @@ // CHECK-LABEL: @leq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -641,8 +641,8 @@ // CHECK-LABEL: @leq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <32 x float> [[CONV]], [[CONV2]] @@ -657,8 +657,8 @@ // CHECK-LABEL: @leq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -670,8 +670,8 @@ // CHECK-LABEL: @leq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -686,9 +686,9 @@ // CHECK-LABEL: @gt_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) @@ -701,8 +701,8 @@ // CHECK-LABEL: @gt_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -714,8 +714,8 @@ // CHECK-LABEL: @gt_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -727,8 +727,8 @@ // CHECK-LABEL: @gt_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -740,8 +740,8 @@ // CHECK-LABEL: @gt_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -753,8 +753,8 @@ // CHECK-LABEL: @gt_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -766,8 +766,8 @@ // CHECK-LABEL: @gt_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -779,8 +779,8 @@ // CHECK-LABEL: @gt_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -792,8 +792,8 @@ // CHECK-LABEL: @gt_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -805,8 +805,8 @@ // CHECK-LABEL: @gt_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <32 x float> [[CONV]], [[CONV2]] @@ -821,8 +821,8 @@ // CHECK-LABEL: @gt_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -834,8 +834,8 @@ // CHECK-LABEL: @gt_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -850,9 +850,9 @@ // CHECK-LABEL: @geq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) @@ -865,8 +865,8 @@ // CHECK-LABEL: @geq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -878,8 +878,8 @@ // CHECK-LABEL: @geq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -891,8 +891,8 @@ // CHECK-LABEL: @geq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -904,8 +904,8 @@ // CHECK-LABEL: @geq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -917,8 +917,8 @@ // CHECK-LABEL: @geq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) @@ -930,8 +930,8 @@ // CHECK-LABEL: @geq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) @@ -943,8 +943,8 @@ // CHECK-LABEL: @geq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -956,8 +956,8 @@ // CHECK-LABEL: @geq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) @@ -969,8 +969,8 @@ // CHECK-LABEL: @geq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <32 x float> [[CONV]], [[CONV2]] @@ -985,8 +985,8 @@ // CHECK-LABEL: @geq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) @@ -998,8 +998,8 @@ // CHECK-LABEL: @geq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) diff --git a/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c b/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c @@ -27,8 +27,8 @@ // CHECK-LABEL: @lshift_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -39,8 +39,8 @@ // CHECK-LABEL: @rshift_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -51,8 +51,8 @@ // CHECK-LABEL: @lshift_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -63,8 +63,8 @@ // CHECK-LABEL: @rshift_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -75,8 +75,8 @@ // CHECK-LABEL: @lshift_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -87,8 +87,8 @@ // CHECK-LABEL: @rshift_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -99,8 +99,8 @@ // CHECK-LABEL: @lshift_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -111,8 +111,8 @@ // CHECK-LABEL: @rshift_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -123,8 +123,8 @@ // CHECK-LABEL: @lshift_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -135,8 +135,8 @@ // CHECK-LABEL: @rshift_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -147,8 +147,8 @@ // CHECK-LABEL: @lshift_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -159,8 +159,8 @@ // CHECK-LABEL: @rshift_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -171,8 +171,8 @@ // CHECK-LABEL: @lshift_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -183,8 +183,8 @@ // CHECK-LABEL: @rshift_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -195,8 +195,8 @@ // CHECK-LABEL: @lshift_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -207,8 +207,8 @@ // CHECK-LABEL: @rshift_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] @@ -219,7 +219,7 @@ // CHECK-LABEL: @lshift_i8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer @@ -234,7 +234,7 @@ // CHECK-LABEL: @lshift_i8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]] @@ -247,7 +247,7 @@ // CHECK-LABEL: @rshift_i8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer @@ -262,7 +262,7 @@ // CHECK-LABEL: @rshift_i8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[SPLAT_SPLAT]], [[A]] @@ -275,7 +275,7 @@ // CHECK-LABEL: @lshift_u8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer @@ -290,7 +290,7 @@ // CHECK-LABEL: @lshift_u8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]] @@ -303,7 +303,7 @@ // CHECK-LABEL: @rshift_u8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer @@ -318,7 +318,7 @@ // CHECK-LABEL: @rshift_u8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[SPLAT_SPLAT]], [[A]] @@ -331,7 +331,7 @@ // CHECK-LABEL: @lshift_i16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer @@ -346,7 +346,7 @@ // CHECK-LABEL: @lshift_i16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]] @@ -359,7 +359,7 @@ // CHECK-LABEL: @rshift_i16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer @@ -374,7 +374,7 @@ // CHECK-LABEL: @rshift_i16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[SPLAT_SPLAT]], [[A]] @@ -387,7 +387,7 @@ // CHECK-LABEL: @lshift_u16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer @@ -402,7 +402,7 @@ // CHECK-LABEL: @lshift_u16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]] @@ -415,7 +415,7 @@ // CHECK-LABEL: @rshift_u16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer @@ -430,7 +430,7 @@ // CHECK-LABEL: @rshift_u16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[SPLAT_SPLAT]], [[A]] @@ -443,7 +443,7 @@ // CHECK-LABEL: @lshift_i32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -456,7 +456,7 @@ // CHECK-LABEL: @lshift_i32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]] @@ -469,7 +469,7 @@ // CHECK-LABEL: @rshift_i32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -482,7 +482,7 @@ // CHECK-LABEL: @rshift_i32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[SPLAT_SPLAT]], [[A]] @@ -495,7 +495,7 @@ // CHECK-LABEL: @lshift_u32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -508,7 +508,7 @@ // CHECK-LABEL: @lshift_u32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]] @@ -521,7 +521,7 @@ // CHECK-LABEL: @rshift_u32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[SPLAT_SPLAT]] @@ -534,7 +534,7 @@ // CHECK-LABEL: @rshift_u32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[SPLAT_SPLAT]], [[A]] @@ -547,7 +547,7 @@ // CHECK-LABEL: @lshift_i64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -560,7 +560,7 @@ // CHECK-LABEL: @lshift_i64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]] @@ -573,7 +573,7 @@ // CHECK-LABEL: @rshift_i64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -586,7 +586,7 @@ // CHECK-LABEL: @rshift_i64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[SPLAT_SPLAT]], [[A]] @@ -599,7 +599,7 @@ // CHECK-LABEL: @lshift_u64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -612,7 +612,7 @@ // CHECK-LABEL: @lshift_u64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]] @@ -625,7 +625,7 @@ // CHECK-LABEL: @rshift_u64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[SPLAT_SPLAT]] @@ -638,7 +638,7 @@ // CHECK-LABEL: @rshift_u64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[SPLAT_SPLAT]], [[A]] diff --git a/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c b/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c @@ -28,7 +28,7 @@ // CHECK-LABEL: @subscript_int16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i16 [[VECEXT]] // @@ -38,7 +38,7 @@ // CHECK-LABEL: @subscript_uint16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i16 [[VECEXT]] // @@ -48,7 +48,7 @@ // CHECK-LABEL: @subscript_int32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i32 [[VECEXT]] // @@ -58,7 +58,7 @@ // CHECK-LABEL: @subscript_uint32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i32 [[VECEXT]] // @@ -68,7 +68,7 @@ // CHECK-LABEL: @subscript_int64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i64 [[VECEXT]] // @@ -78,7 +78,7 @@ // CHECK-LABEL: @subscript_uint64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i64 [[VECEXT]] // @@ -88,7 +88,7 @@ // CHECK-LABEL: @subscript_float16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x half> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret half [[VECEXT]] // @@ -98,7 +98,7 @@ // CHECK-LABEL: @subscript_float32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x float> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret float [[VECEXT]] // @@ -108,7 +108,7 @@ // CHECK-LABEL: @subscript_float64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x double> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret double [[VECEXT]] // diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c @@ -16,27 +16,26 @@ // CHECK-LABEL: @test_svget_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svget_neonq_s8u10__SVInt8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_svget_neonq_s8(svint8_t n) { return SVE_ACLE_FUNC(svget_neonq, _s8, , )(n); } -// // CHECK-LABEL: @test_svget_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s16u11__SVInt16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_svget_neonq_s16(svint16_t n) { @@ -45,12 +44,12 @@ // CHECK-LABEL: @test_svget_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s32u11__SVInt32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_svget_neonq_s32(svint32_t n) { @@ -59,12 +58,12 @@ // CHECK-LABEL: @test_svget_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x i64> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s64u11__SVInt64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]] // int64x2_t test_svget_neonq_s64(svint64_t n) { @@ -73,12 +72,12 @@ // CHECK-LABEL: @test_svget_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svget_neonq_u8u11__SVUint8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_svget_neonq_u8(svuint8_t n) { @@ -87,12 +86,12 @@ // CHECK-LABEL: @test_svget_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u16u12__SVUint16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_svget_neonq_u16(svuint16_t n) { @@ -101,12 +100,12 @@ // CHECK-LABEL: @test_svget_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u32u12__SVUint32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_svget_neonq_u32(svuint32_t n) { @@ -115,12 +114,12 @@ // CHECK-LABEL: @test_svget_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x i64> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u64u12__SVUint64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]] // uint64x2_t test_svget_neonq_u64(svuint64_t n) { @@ -129,12 +128,12 @@ // CHECK-LABEL: @test_svget_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x half> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f16u13__SVFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_svget_neonq_f16(svfloat16_t n) { @@ -143,26 +142,27 @@ // CHECK-LABEL: @test_svget_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x float> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f32u13__SVFloat32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_svget_neonq_f32(svfloat32_t n) { return SVE_ACLE_FUNC(svget_neonq, _f32, , )(n); } +// // CHECK-LABEL: @test_svget_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x double> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f64u13__SVFloat64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x double> [[TMP0]] // float64x2_t test_svget_neonq_f64(svfloat64_t n) { @@ -171,12 +171,12 @@ // CHECK-LABEL: @test_svget_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16.i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x bfloat> [[TMP0]] // // CPP-CHECK-LABEL: @_Z21test_svget_neonq_bf16u14__SVBFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16.i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x bfloat> [[TMP0]] // bfloat16x8_t test_svget_neonq_bf16(svbfloat16_t n) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c @@ -55,21 +55,21 @@ // CHECK-128-LABEL: @write_int64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64.i64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_int64( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i64> [[CASTFIXEDSVE]], <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_int64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -109,21 +109,21 @@ // CHECK-128-LABEL: @write_float64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64.i64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x double> [[CASTFIXEDSVE]], <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_float64( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64.i64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x double> [[CASTFIXEDSVE]], <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_float64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64.i64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -163,21 +163,21 @@ // CHECK-128-LABEL: @write_bfloat16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16.i64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bfloat16( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16.i64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <16 x bfloat> [[CASTFIXEDSVE]], <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bfloat16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16.i64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -194,24 +194,24 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) -// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_bool( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> [[TMP0]], i64 0) -// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) -// CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_bool(struct struct_bool *s) { @@ -220,24 +220,24 @@ // CHECK-128-LABEL: @write_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast [[X:%.*]] to +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8.i64( [[TMP0]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bool( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast [[X:%.*]] to +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8.i64( [[TMP0]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i8> [[CASTFIXEDSVE]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast [[X:%.*]] to +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[TMP0]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -46,7 +46,7 @@ // CHECK-LABEL: @to_svbool_t( // CHECK-NEXT: entry: -// CHECK-NEXT: ret [[TYPE:%.*]] +// CHECK-NEXT: ret [[TMP0:%.*]] // svbool_t to_svbool_t(fixed_bool_t type) { return type; @@ -62,12 +62,12 @@ // CHECK-LABEL: @lax_cast( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = alloca <16 x i32>, align 64 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) -// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[TMP0:%.*]], align 64, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[TMP0]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 64, !tbaa [[TBAA6]] -// CHECK-NEXT: ret [[TMP2]] +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <16 x i32>, align 64 +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[SAVED_VALUE]], align 64, !tbaa [[TBAA6:![0-9]+]] +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <16 x i32>* [[SAVED_VALUE]] to * +// CHECK-NEXT: [[TMP0:%.*]] = load , * [[CASTFIXEDSVE]], align 64, !tbaa [[TBAA6]] +// CHECK-NEXT: ret [[TMP0]] // svint64_t lax_cast(fixed_int32_t type) { return type; @@ -75,7 +75,7 @@ // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6:![0-9]+]] +// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // @@ -85,7 +85,7 @@ // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[TYPE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // @@ -105,7 +105,7 @@ // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[TYPE_COERCE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c @@ -24,24 +24,24 @@ // CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP3]], i64 0) -// CHECK-NEXT: [[TMP4:%.*]] = bitcast [[CASTFIXEDSVE2]] to +// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = bitcast [[CASTSCALABLESVE1]] to // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP4]]) // CHECK-NEXT: store [[TMP5]], * [[PG]], align 2 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP8:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP6]]) -// CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE]], [[TMP8]]) -// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) -// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 +// CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE2]], [[TMP8]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[TMP10]], i64 0) +// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE4]] +// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) +// CHECK-NEXT: ret [[CASTSCALABLESVE3]] // fixed_int32_t foo(svbool_t pred, svint32_t vec) { svbool_t pg = svand_z(pred, global_pred, global_pred); @@ -96,8 +96,8 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 2 // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, <8 x i8>* [[RETVAL]], align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP2]], i64 0) -// CHECK-NEXT: [[TMP3:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP2]], i64 0) +// CHECK-NEXT: [[TMP3:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP3]] // fixed_bool_t address_of_array_idx() { @@ -121,26 +121,26 @@ // CHECK-NEXT: store <8 x i8> , <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[XX]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]] -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[ADD]], i64 0) -// CHECK-NEXT: [[TMP5:%.*]] = bitcast [[CASTFIXEDSVE2]] to +// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[TMP5:%.*]] = bitcast [[CASTSCALABLESVE1]] to // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP5]]) // CHECK-NEXT: store [[TMP6]], * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP7]]) -// CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP10]], [[CASTSCALABLESVE]], [[TMP9]]) -// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) -// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 +// CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP10]], [[CASTSCALABLESVE2]], [[TMP9]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( [[TMP11]], i64 0) +// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE4]] +// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) +// CHECK-NEXT: ret [[CASTSCALABLESVE3]] // fixed_int32_t test_cast(svbool_t pred, svint32_t vec) { uint8_vec_t xx = {1, 2, 3, 4}; diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c @@ -22,13 +22,13 @@ // CHECK-128-LABEL: @write_global_i64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64.i64( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_i64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64.i64( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-512-NEXT: ret void // @@ -36,13 +36,13 @@ // CHECK-128-LABEL: @write_global_bf16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16.i64( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bf16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16.i64( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -51,14 +51,14 @@ // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) -// CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6:![0-9]+]] +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8.i64( [[TMP0]], i64 0) +// CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8.i64( [[TMP0]], i64 0) // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -99,15 +99,15 @@ // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) -// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) -// CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_global_bool() { return global_bool; } diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -918,8 +918,8 @@ CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, const Twine &Name = "") { return CreateIntrinsic(Intrinsic::experimental_vector_extract, - {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, - Name); + {DstType, SrcVec->getType(), Idx->getType()}, + {SrcVec, Idx}, nullptr, Name); } /// Create a call to the experimental.vector.insert intrinsic. diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1966,7 +1966,7 @@ [IntrNoMem, ImmArg>]>; def int_experimental_vector_extract : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_i64_ty], + [llvm_anyvector_ty, llvm_anyint_ty], [IntrNoMem, ImmArg>]>; //===----------------- Pointer Authentication Intrinsics ------------------===// diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll --- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll @@ -3,21 +3,21 @@ define void @vector_insert_extract( %v0, %v1, <16 x i32> %v2) { ; CHECK-LABEL: 'vector_insert_extract' -; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( %v0, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( %v1, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) + %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( %v0, i64 0) %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) - %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) + %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( %v1, i64 0) %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ret void } -declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(, i64) +declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64(, i64) declare @llvm.experimental.vector.insert.nxv4i32.v16i32(, <16 x i32>, i64) -declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32(, i64) +declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64(, i64) declare @llvm.experimental.vector.insert.nxv16i32.nxv4i32(, , i64) diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll --- a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll +++ b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll @@ -36,21 +36,21 @@ define void @vector_insert_extract( %v0, %v1, <16 x i32> %v2) { ; CHECK-LABEL: 'vector_insert_extract' -; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( %v0, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( %v1, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) + %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64( %v0, i64 0) %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) - %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) + %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64( %v1, i64 0) %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ret void } -declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(, i64) +declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32.i64(, i64) declare @llvm.experimental.vector.insert.nxv4i32.v16i32(, <16 x i32>, i64) -declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32(, i64) +declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32.i64(, i64) declare @llvm.experimental.vector.insert.nxv16i32.nxv4i32(, , i64) define void @vector_reverse() { diff --git a/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll b/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll @@ -4,12 +4,12 @@ ; llvm.experimental.vector.extract canonicalizes to shufflevector in the fixed case. In the ; scalable case, we lower to the EXTRACT_SUBVECTOR ISD node. -declare <10 x i32> @llvm.experimental.vector.extract.v10i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 %idx) -declare <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %vec, i64 %idx) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 %idx) +declare <10 x i32> @llvm.experimental.vector.extract.v10i32.v8i32.i64(<8 x i32> %vec, i64 %idx) +declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32.i64(<8 x i32> %vec, i64 %idx) +declare <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32.i64(<8 x i32> %vec, i64 %idx) +declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( %vec, i64 %idx) +declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32.i64(<8 x i32> %vec, i64 %idx) +declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32.i64(<8 x i32> %vec, i64 %idx) ; ============================================================================ ; ; Trivial cases @@ -20,7 +20,7 @@ ; CHECK-LABEL: @trivial_nop( ; CHECK-NEXT: ret <8 x i32> [[VEC:%.*]] ; - %1 = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 0) + %1 = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32.i64(<8 x i32> %vec, i64 0) ret <8 x i32> %1 } @@ -33,7 +33,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 0) + %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32.i64(<8 x i32> %vec, i64 0) ret <2 x i32> %1 } @@ -42,7 +42,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 2) + %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32.i64(<8 x i32> %vec, i64 2) ret <2 x i32> %1 } @@ -51,7 +51,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 4) + %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32.i64(<8 x i32> %vec, i64 4) ret <2 x i32> %1 } @@ -60,7 +60,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 6) + %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32.i64(<8 x i32> %vec, i64 6) ret <2 x i32> %1 } @@ -69,7 +69,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 0) + %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32.i64(<8 x i32> %vec, i64 0) ret <4 x i32> %1 } @@ -78,7 +78,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 4) + %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32.i64(<8 x i32> %vec, i64 4) ret <4 x i32> %1 } @@ -87,7 +87,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[TMP1]] ; - %1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 0) + %1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32.i64(<8 x i32> %vec, i64 0) ret <3 x i32> %1 } @@ -96,7 +96,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[TMP1]] ; - %1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 3) + %1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32.i64(<8 x i32> %vec, i64 3) ret <3 x i32> %1 } @@ -108,9 +108,9 @@ ; EXTRACT_SUBVECTOR ISD node later. define <4 x i32> @scalable_extract( %vec) { ; CHECK-LABEL: @scalable_extract( -; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[VEC:%.*]], i64 0) +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( [[VEC:%.*]], i64 0) ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %vec, i64 0) + %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32.i64( %vec, i64 0) ret <4 x i32> %1 } diff --git a/llvm/test/Transforms/InstSimplify/extract-vector.ll b/llvm/test/Transforms/InstSimplify/extract-vector.ll --- a/llvm/test/Transforms/InstSimplify/extract-vector.ll +++ b/llvm/test/Transforms/InstSimplify/extract-vector.ll @@ -6,21 +6,21 @@ ; CHECK-NEXT: ret <16 x i8> [[X:%.*]] ; %inserted = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) - %extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8( %inserted, i64 0) + %extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8.i64( %inserted, i64 0) ret <16 x i8> %extracted } define <8 x i8> @non_redundant_insert_extract_chain(<16 x i8> %x) { ; CHECK-LABEL: @non_redundant_insert_extract_chain( ; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) -; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8( [[INSERTED]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8.i64( [[INSERTED]], i64 0) ; CHECK-NEXT: ret <8 x i8> [[EXTRACTED]] ; %inserted = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) - %extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8( %inserted, i64 0) + %extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8.i64( %inserted, i64 0) ret <8 x i8> %extracted } -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(, i64) -declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(, i64) +declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8.i64(, i64) +declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8.i64(, i64) declare @llvm.experimental.vector.insert.nxv32i8.v16i8(, <16 x i8>, i64) diff --git a/llvm/test/Transforms/InstSimplify/insert-vector.ll b/llvm/test/Transforms/InstSimplify/insert-vector.ll --- a/llvm/test/Transforms/InstSimplify/insert-vector.ll +++ b/llvm/test/Transforms/InstSimplify/insert-vector.ll @@ -5,33 +5,33 @@ ; CHECK-LABEL: @redundant_extract_insert_chain( ; CHECK-NEXT: ret [[X:%.*]] ; - %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( %x, i64 0) + %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8.i64( %x, i64 0) %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) ret %inserted } define @non_redundant_extract_insert_chain_0( %x) { ; CHECK-LABEL: @non_redundant_extract_insert_chain_0( -; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8( [[X:%.*]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8.i64( [[X:%.*]], i64 0) ; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> [[EXTRACTED]], i64 0) ; CHECK-NEXT: ret [[INSERTED]] ; - %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8( %x, i64 0) + %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8.i64( %x, i64 0) %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) ret %inserted } define @non_redundant_extract_insert_chain_1( %x, %y) { ; CHECK-LABEL: @non_redundant_extract_insert_chain_1( -; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( [[X:%.*]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8.i64( [[X:%.*]], i64 0) ; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v32i8( [[Y:%.*]], <32 x i8> [[EXTRACTED]], i64 0) ; CHECK-NEXT: ret [[INSERTED]] ; - %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( %x, i64 0) + %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8.i64( %x, i64 0) %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( %y, <32 x i8> %extracted, i64 0) ret %inserted } -declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(, i64) -declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(, i64) +declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8.i64(, i64) +declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8.i64(, i64) declare @llvm.experimental.vector.insert.nxv16i8.v32i8(, <32 x i8>, i64) diff --git a/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll --- a/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll +++ b/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll @@ -8,9 +8,9 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <32 x i16>* %ptr to i16* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv8i16( [[PTRUE]], i16* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT1:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT2:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16.i64( [[TMP3]], i64 0) ; CHECK-NEXT: ret void %interleaved.vec = load <32 x i16>, <32 x i16>* %ptr, align 4 %v0 = shufflevector <32 x i16> %interleaved.vec, <32 x i16> poison, <16 x i32> * %ptr to i32* ; CHECK-NEXT: [[LDN:%.*]] = call { , , } @llvm.aarch64.sve.ld3.sret.nxv4i32( [[PTRUE]], i32* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT1:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT2:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT3:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32.i64( [[TMP4]], i64 0) ; CHECK-NEXT: ret void %interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4 %v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> poison, <8 x i32> @@ -45,13 +45,13 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i64>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[LDN]], 3 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) +; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP5]], i64 0) ; CHECK-NEXT: ret void %interleaved.vec = load <16 x i64>, <16 x i64>* %ptr, align 4 %v0 = shufflevector <16 x i64> %interleaved.vec, <16 x i64> poison, <4 x i32> @@ -126,10 +126,10 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i32*>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*> ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*> ; CHECK-NEXT: ret void %interleaved.vec = load <8 x i32*>, <8 x i32*>* %ptr, align 4 @@ -144,13 +144,13 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <12 x i32*>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , , } @llvm.aarch64.sve.ld3.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*> ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*> ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP4]], i64 0) ; CHECK-NEXT: [[TOP3:%.*]] = inttoptr <4 x i64> [[EXT3]] to <4 x i32*> ; CHECK-NEXT: ret void %interleaved.vec = load <12 x i32*>, <12 x i32*>* %ptr, align 4 @@ -166,16 +166,16 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32*>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[LDN]], 3 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*> ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*> ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP4]], i64 0) ; CHECK-NEXT: [[TOP3:%.*]] = inttoptr <4 x i64> [[EXT3]] to <4 x i32*> ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) +; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TOP4:%.*]] = inttoptr <4 x i64> [[EXT4]] to <4 x i32*> ; CHECK-NEXT: ret void %interleaved.vec = load <16 x i32*>, <16 x i32*>* %ptr, align 4 @@ -254,15 +254,15 @@ ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[TMP1]], i32 8 ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv2i64( [[PTRUE]], i64* [[TMP4]]) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP6]], i64 0) +; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64.i64( [[TMP6]], i64 0) ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i64> [[EXT1]], <4 x i64> [[EXT3]], <8 x i32> ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i64> [[EXT2]], <4 x i64> [[EXT4]], <8 x i32> ; CHECK-NEXT: ret void @@ -357,13 +357,13 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x double>* [[PTR:%.*]] to double* ; CHECK-NEXT: [[LDN:%.*]] = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv2f64( [[TMP1]], double* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[LDN]], 3 -; CHECK-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[LDN]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64.i64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[LDN]], 1 -; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP7]], i64 0) +; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64.i64( [[TMP7]], i64 0) ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { , , , } [[LDN]], 0 -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP9]], i64 0) +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64.i64( [[TMP9]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <16 x double>, <16 x double>* %ptr, align 4 @@ -380,11 +380,11 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <24 x float>* [[PTR:%.*]] to float* ; CHECK-NEXT: [[LDN:%.*]] = call { , , } @llvm.aarch64.sve.ld3.sret.nxv4f32( [[TMP1]], float* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[LDN]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[LDN]], 1 -; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32.i64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { , , } [[LDN]], 0 -; CHECK-NEXT: [[TMP8:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32( [[TMP7]], i64 0) +; CHECK-NEXT: [[TMP8:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32.i64( [[TMP7]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <24 x float>, <24 x float>* %ptr, align 4 @@ -400,9 +400,9 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x half>* [[PTR:%.*]] to half* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv8f16( [[TMP1]], half* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[TMP4:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16.i64( [[TMP5]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <32 x half>, <32 x half>* %ptr, align 4 @@ -417,9 +417,9 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x bfloat>* [[PTR:%.*]] to bfloat* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv8bf16( [[TMP1]], bfloat* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[TMP4:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16.i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16.i64( [[TMP5]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <32 x bfloat>, <32 x bfloat>* %ptr, align 4