diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2196,7 +2196,7 @@ // C/C++ Operand: VecTy, IR Operand: VecTy, Index let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc", MaskedPolicy = NonePolicy, ManualCodegen = [{ { - ID = Intrinsic::experimental_vector_extract; + ID = Intrinsic::vector_extract; IntrinsicTypes = {ResultType, Ops[0]->getType()}; Ops.push_back(ConstantInt::get(Int64Ty, 0)); return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, ""); @@ -2214,7 +2214,7 @@ // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext", MaskedPolicy = NonePolicy, ManualCodegen = [{ - ID = Intrinsic::experimental_vector_insert; + ID = Intrinsic::vector_insert; IntrinsicTypes = {ResultType, Ops[0]->getType()}; Ops.push_back(llvm::UndefValue::get(ResultType)); std::swap(Ops[0], Ops[1]); @@ -2233,7 +2233,7 @@ let Name = "vget_v", MaskedPolicy = NonePolicy, ManualCodegen = [{ { - ID = Intrinsic::experimental_vector_extract; + ID = Intrinsic::vector_extract; auto *VecTy = cast(ResultType); auto *OpVecTy = cast(Ops[0]->getType()); // Mask to only valid indices. @@ -2256,7 +2256,7 @@ let Name = "vset_v", Log2LMUL = [0, 1, 2], MaskedPolicy = NonePolicy, ManualCodegen = [{ { - ID = Intrinsic::experimental_vector_insert; + ID = Intrinsic::vector_insert; IntrinsicTypes = {ResultType, Ops[2]->getType()}; auto *ResVecTy = cast(ResultType); auto *VecTy = cast(Ops[2]->getType()); diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1285,8 +1285,8 @@ } // If coercing a fixed vector to a scalable vector for ABI compatibility, and - // the types match, use the llvm.experimental.vector.insert intrinsic to - // perform the conversion. + // the types match, use the llvm.vector.insert intrinsic to perform the + // conversion. if (auto *ScalableDst = dyn_cast(Ty)) { if (auto *FixedSrc = dyn_cast(SrcTy)) { // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate @@ -2930,8 +2930,7 @@ // VLST arguments are coerced to VLATs at the function boundary for // ABI consistency. If this is a VLST that was coerced to // a VLAT at the function boundary and the types match up, use - // llvm.experimental.vector.extract to convert back to the original - // VLST. + // llvm.vector.extract to convert back to the original VLST. if (auto *VecTyTo = dyn_cast(ConvertType(Ty))) { llvm::Value *Coerced = Fn->getArg(FirstIRArg); if (auto *VecTyFrom = diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2084,8 +2084,8 @@ } // If Src is a fixed vector and Dst is a scalable vector, and both have the - // same element type, use the llvm.experimental.vector.insert intrinsic to - // perform the bitcast. + // same element type, use the llvm.vector.insert intrinsic to perform the + // bitcast. if (const auto *FixedSrc = dyn_cast(SrcTy)) { if (const auto *ScalableDst = dyn_cast(DstTy)) { // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate @@ -2112,8 +2112,8 @@ } // If Src is a scalable vector and Dst is a fixed vector, and both have the - // same element type, use the llvm.experimental.vector.extract intrinsic to - // perform the bitcast. + // same element type, use the llvm.vector.extract intrinsic to perform the + // bitcast. if (const auto *ScalableSrc = dyn_cast(SrcTy)) { if (const auto *FixedDst = dyn_cast(DstTy)) { // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { @@ -547,7 +547,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { @@ -556,7 +556,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { @@ -565,7 +565,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { @@ -574,7 +574,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { @@ -583,7 +583,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { @@ -592,7 +592,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { @@ -601,7 +601,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { @@ -610,7 +610,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { @@ -619,7 +619,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { @@ -628,7 +628,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { @@ -637,7 +637,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { @@ -646,7 +646,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { @@ -655,7 +655,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { @@ -664,7 +664,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { @@ -673,7 +673,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { @@ -682,7 +682,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { @@ -691,7 +691,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { @@ -700,7 +700,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { @@ -709,7 +709,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { @@ -718,7 +718,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { @@ -727,7 +727,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { @@ -736,7 +736,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { @@ -745,7 +745,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { @@ -754,7 +754,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { @@ -763,7 +763,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { @@ -772,7 +772,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { @@ -781,7 +781,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { @@ -790,7 +790,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { @@ -799,7 +799,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { @@ -808,7 +808,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { @@ -817,7 +817,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { @@ -826,7 +826,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { @@ -835,7 +835,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { @@ -844,7 +844,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { @@ -853,7 +853,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { @@ -862,7 +862,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { @@ -871,7 +871,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { @@ -880,7 +880,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { @@ -889,7 +889,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { @@ -898,7 +898,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { @@ -907,7 +907,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { @@ -916,7 +916,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { @@ -925,7 +925,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { @@ -934,7 +934,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { @@ -943,7 +943,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { @@ -952,7 +952,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { @@ -961,7 +961,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { @@ -970,7 +970,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { @@ -979,7 +979,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { @@ -988,7 +988,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { @@ -997,7 +997,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { @@ -1006,7 +1006,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { @@ -1015,7 +1015,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { @@ -1024,7 +1024,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { @@ -1033,7 +1033,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { @@ -1042,7 +1042,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { @@ -1051,7 +1051,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { @@ -1060,7 +1060,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { @@ -1069,7 +1069,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { @@ -1078,7 +1078,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { @@ -1087,7 +1087,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { @@ -1096,7 +1096,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { @@ -1105,7 +1105,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { @@ -1114,7 +1114,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { @@ -1123,7 +1123,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { @@ -1132,7 +1132,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { @@ -1141,7 +1141,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { @@ -1150,7 +1150,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { @@ -1159,7 +1159,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { @@ -1168,7 +1168,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { @@ -1177,7 +1177,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { @@ -1186,7 +1186,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { @@ -1195,7 +1195,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { @@ -1204,7 +1204,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { @@ -1213,7 +1213,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { @@ -1222,7 +1222,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { @@ -1231,7 +1231,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { @@ -1240,7 +1240,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { @@ -1249,7 +1249,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { @@ -1258,7 +1258,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { @@ -1267,7 +1267,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { @@ -1276,7 +1276,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { @@ -1285,7 +1285,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { @@ -1294,7 +1294,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { @@ -1303,7 +1303,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { @@ -1312,7 +1312,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { @@ -1321,7 +1321,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { @@ -1330,7 +1330,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { @@ -1339,7 +1339,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { @@ -1348,7 +1348,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { @@ -1357,7 +1357,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { @@ -1366,7 +1366,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { @@ -1375,7 +1375,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { @@ -1384,7 +1384,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { @@ -1393,7 +1393,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { @@ -1402,7 +1402,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { @@ -1411,7 +1411,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { @@ -1420,7 +1420,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { @@ -1429,7 +1429,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { @@ -1438,7 +1438,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { @@ -1447,7 +1447,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { @@ -1456,7 +1456,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { @@ -1465,7 +1465,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { @@ -1474,7 +1474,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { @@ -1483,7 +1483,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { @@ -1492,7 +1492,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { @@ -1501,7 +1501,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { @@ -1510,7 +1510,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { @@ -1519,7 +1519,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { @@ -1528,7 +1528,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { @@ -1537,7 +1537,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { @@ -1546,7 +1546,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { @@ -1555,7 +1555,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { @@ -1564,7 +1564,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { @@ -1573,7 +1573,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { @@ -1582,7 +1582,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { @@ -1591,7 +1591,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { @@ -1600,7 +1600,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { @@ -1609,7 +1609,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { @@ -1618,7 +1618,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { @@ -1627,7 +1627,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { @@ -1636,7 +1636,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { @@ -1645,7 +1645,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { @@ -1654,7 +1654,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { @@ -1663,7 +1663,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { @@ -1672,7 +1672,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { @@ -1681,7 +1681,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { @@ -1690,7 +1690,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { @@ -1699,7 +1699,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { @@ -1708,7 +1708,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { @@ -1717,7 +1717,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { @@ -1726,7 +1726,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { @@ -1735,7 +1735,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { @@ -1744,7 +1744,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { @@ -1753,7 +1753,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { @@ -1762,7 +1762,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { @@ -1771,7 +1771,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { @@ -1780,7 +1780,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { @@ -1789,7 +1789,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { @@ -1798,7 +1798,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { @@ -1807,7 +1807,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { @@ -1816,7 +1816,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { @@ -1825,7 +1825,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { @@ -1834,7 +1834,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { @@ -1843,7 +1843,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { @@ -1852,7 +1852,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { @@ -1861,7 +1861,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { @@ -1870,7 +1870,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { @@ -1879,7 +1879,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { @@ -1888,7 +1888,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { @@ -1897,7 +1897,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { @@ -1906,7 +1906,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { @@ -1915,7 +1915,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { @@ -1924,7 +1924,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { @@ -1933,7 +1933,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { @@ -1942,7 +1942,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { @@ -1951,7 +1951,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { @@ -1960,7 +1960,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { @@ -1969,7 +1969,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { @@ -1978,7 +1978,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { @@ -1987,7 +1987,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { @@ -1996,7 +1996,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { @@ -2005,7 +2005,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { @@ -2014,7 +2014,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { @@ -2023,7 +2023,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { @@ -2032,7 +2032,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { @@ -2041,7 +2041,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { @@ -2050,7 +2050,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { @@ -2059,7 +2059,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { @@ -2068,7 +2068,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { @@ -2077,7 +2077,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { @@ -2086,7 +2086,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { @@ -2095,7 +2095,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { @@ -2104,7 +2104,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { @@ -2113,7 +2113,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { @@ -2122,7 +2122,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { @@ -2131,7 +2131,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { @@ -2140,7 +2140,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { @@ -2149,7 +2149,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { @@ -2158,7 +2158,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c @@ -7,7 +7,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { @@ -16,7 +16,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { @@ -25,7 +25,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { @@ -34,7 +34,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { @@ -43,7 +43,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { @@ -52,7 +52,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { @@ -61,7 +61,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) { @@ -70,7 +70,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) { @@ -79,7 +79,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) { @@ -88,7 +88,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) { @@ -97,7 +97,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) { @@ -106,7 +106,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) { @@ -115,7 +115,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) { @@ -124,7 +124,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) { @@ -133,7 +133,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) { @@ -142,7 +142,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) { @@ -151,7 +151,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) { @@ -160,7 +160,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) { @@ -169,7 +169,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) { @@ -178,7 +178,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) { @@ -187,7 +187,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) { @@ -196,7 +196,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) { @@ -205,7 +205,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) { @@ -214,7 +214,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) { @@ -223,7 +223,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) { @@ -232,7 +232,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) { @@ -241,7 +241,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) { @@ -250,7 +250,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) { @@ -259,7 +259,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) { @@ -268,7 +268,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) { @@ -277,7 +277,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) { @@ -286,7 +286,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) { @@ -295,7 +295,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) { @@ -304,7 +304,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) { @@ -313,7 +313,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) { @@ -322,7 +322,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) { @@ -331,7 +331,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) { @@ -340,7 +340,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) { @@ -349,7 +349,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) { @@ -358,7 +358,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) { @@ -367,7 +367,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) { @@ -376,7 +376,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) { @@ -385,7 +385,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) { @@ -394,7 +394,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) { @@ -403,7 +403,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) { @@ -412,7 +412,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) { @@ -421,7 +421,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) { @@ -430,7 +430,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) { @@ -439,7 +439,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) { @@ -448,7 +448,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) { @@ -457,7 +457,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) { @@ -466,7 +466,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) { @@ -475,7 +475,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) { @@ -484,7 +484,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) { @@ -493,7 +493,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) { @@ -502,7 +502,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) { @@ -511,7 +511,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) { @@ -520,7 +520,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) { @@ -529,7 +529,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) { @@ -538,7 +538,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget-vset-ice.cpp @@ -12,7 +12,7 @@ // CHECK-RV64-LABEL: @_Z21test_vget_v_i8m2_i8m1u14__rvv_int8m2_t // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { @@ -21,7 +21,7 @@ // CHECK-RV64-LABEL: @_Z21test_vset_v_i8m1_i8m2u14__rvv_int8m2_tu14__rvv_int8m1_t // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vget.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 48) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src) { @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m2_f16m1 (vfloat16m2_t src) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m4_f16m1 (vfloat16m4_t src) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vget_v_f16m8_f16m1 (vfloat16m8_t src) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m4_f16m2 (vfloat16m4_t src) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vget_v_f16m8_f16m2 (vfloat16m8_t src) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vget_v_f16m8_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[SRC:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vget_v_f16m8_f16m4 (vfloat16m8_t src) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { @@ -602,7 +602,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { @@ -611,7 +611,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { @@ -620,7 +620,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { @@ -629,7 +629,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { @@ -638,7 +638,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { @@ -647,7 +647,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { @@ -656,7 +656,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { @@ -665,7 +665,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { @@ -674,7 +674,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { @@ -683,7 +683,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { @@ -692,7 +692,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { @@ -701,7 +701,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { @@ -710,7 +710,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { @@ -719,7 +719,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { @@ -728,7 +728,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { @@ -737,7 +737,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { @@ -746,7 +746,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { @@ -755,7 +755,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { @@ -764,7 +764,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { @@ -773,7 +773,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { @@ -782,7 +782,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { @@ -791,7 +791,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { @@ -800,7 +800,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { @@ -809,7 +809,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { @@ -818,7 +818,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { @@ -827,7 +827,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { @@ -836,7 +836,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { @@ -845,7 +845,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { @@ -854,7 +854,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { @@ -863,7 +863,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { @@ -872,7 +872,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { @@ -881,7 +881,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { @@ -890,7 +890,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { @@ -899,7 +899,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { @@ -908,7 +908,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { @@ -917,7 +917,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { @@ -926,7 +926,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { @@ -935,7 +935,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { @@ -944,7 +944,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { @@ -953,7 +953,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { @@ -962,7 +962,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { @@ -971,7 +971,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { @@ -980,7 +980,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { @@ -989,7 +989,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { @@ -998,7 +998,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { @@ -1007,7 +1007,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { @@ -1016,7 +1016,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { @@ -1025,7 +1025,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { @@ -1034,7 +1034,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { @@ -1043,7 +1043,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { @@ -1052,7 +1052,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { @@ -1061,7 +1061,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { @@ -1070,7 +1070,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { @@ -1079,7 +1079,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { @@ -1088,7 +1088,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { @@ -1097,7 +1097,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { @@ -1106,7 +1106,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { @@ -1115,7 +1115,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { @@ -1124,7 +1124,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { @@ -1133,7 +1133,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { @@ -1142,7 +1142,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { @@ -1151,7 +1151,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { @@ -1160,7 +1160,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { @@ -1169,7 +1169,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { @@ -1178,7 +1178,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { @@ -1187,7 +1187,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { @@ -1196,7 +1196,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { @@ -1205,7 +1205,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { @@ -1214,7 +1214,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { @@ -1223,7 +1223,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { @@ -1232,7 +1232,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { @@ -1241,7 +1241,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { @@ -1250,7 +1250,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { @@ -1259,7 +1259,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { @@ -1268,7 +1268,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { @@ -1277,7 +1277,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { @@ -1286,7 +1286,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { @@ -1295,7 +1295,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { @@ -1304,7 +1304,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { @@ -1313,7 +1313,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { @@ -1322,7 +1322,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { @@ -1331,7 +1331,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { @@ -1340,7 +1340,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { @@ -1349,7 +1349,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { @@ -1358,7 +1358,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { @@ -1367,7 +1367,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { @@ -1376,7 +1376,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { @@ -1385,7 +1385,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { @@ -1394,7 +1394,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { @@ -1403,7 +1403,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { @@ -1412,7 +1412,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { @@ -1421,7 +1421,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { @@ -1430,7 +1430,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { @@ -1439,7 +1439,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { @@ -1448,7 +1448,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { @@ -1457,7 +1457,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { @@ -1466,7 +1466,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { @@ -1475,7 +1475,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { @@ -1484,7 +1484,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { @@ -1493,7 +1493,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { @@ -1502,7 +1502,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { @@ -1511,7 +1511,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { @@ -1520,7 +1520,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { @@ -1529,7 +1529,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { @@ -1538,7 +1538,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { @@ -1547,7 +1547,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { @@ -1556,7 +1556,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { @@ -1565,7 +1565,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { @@ -1574,7 +1574,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { @@ -1583,7 +1583,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { @@ -1592,7 +1592,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { @@ -1601,7 +1601,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { @@ -1610,7 +1610,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { @@ -1619,7 +1619,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { @@ -1628,7 +1628,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { @@ -1637,7 +1637,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { @@ -1646,7 +1646,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { @@ -1655,7 +1655,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { @@ -1664,7 +1664,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { @@ -1673,7 +1673,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { @@ -1682,7 +1682,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { @@ -1691,7 +1691,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { @@ -1700,7 +1700,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { @@ -1709,7 +1709,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { @@ -1718,7 +1718,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { @@ -1727,7 +1727,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { @@ -1736,7 +1736,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { @@ -1745,7 +1745,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { @@ -1754,7 +1754,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { @@ -1763,7 +1763,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { @@ -1772,7 +1772,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { @@ -1781,7 +1781,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { @@ -1790,7 +1790,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { @@ -1799,7 +1799,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { @@ -1808,7 +1808,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { @@ -1817,7 +1817,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { @@ -1826,7 +1826,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { @@ -1835,7 +1835,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { @@ -1844,7 +1844,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { @@ -1853,7 +1853,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { @@ -1862,7 +1862,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { @@ -1871,7 +1871,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { @@ -1880,7 +1880,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { @@ -1889,7 +1889,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { @@ -1898,7 +1898,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { @@ -1907,7 +1907,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { @@ -1916,7 +1916,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { @@ -1925,7 +1925,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { @@ -1934,7 +1934,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { @@ -1943,7 +1943,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { @@ -1952,7 +1952,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { @@ -1961,7 +1961,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { @@ -1970,7 +1970,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { @@ -1979,7 +1979,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { @@ -1988,7 +1988,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { @@ -1997,7 +1997,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { @@ -2006,7 +2006,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { @@ -2015,7 +2015,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { @@ -2024,7 +2024,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { @@ -2033,7 +2033,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { @@ -2042,7 +2042,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { @@ -2051,7 +2051,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { @@ -2060,7 +2060,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { @@ -2069,7 +2069,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { @@ -2078,7 +2078,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { @@ -2087,7 +2087,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { @@ -2096,7 +2096,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { @@ -2105,7 +2105,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { @@ -2114,7 +2114,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { @@ -2123,7 +2123,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { @@ -2132,7 +2132,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { @@ -2141,7 +2141,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { @@ -2150,7 +2150,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { @@ -2159,7 +2159,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { @@ -2168,7 +2168,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2 (vfloat16mf4_t op1) { @@ -2177,7 +2177,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1 (vfloat16mf4_t op1) { @@ -2186,7 +2186,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2 (vfloat16mf4_t op1) { @@ -2195,7 +2195,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4 (vfloat16mf4_t op1) { @@ -2204,7 +2204,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8 (vfloat16mf4_t op1) { @@ -2213,7 +2213,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1 (vfloat16mf2_t op1) { @@ -2222,7 +2222,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2 (vfloat16mf2_t op1) { @@ -2231,7 +2231,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4 (vfloat16mf2_t op1) { @@ -2240,7 +2240,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8 (vfloat16mf2_t op1) { @@ -2249,7 +2249,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2 (vfloat16m1_t op1) { @@ -2258,7 +2258,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4 (vfloat16m1_t op1) { @@ -2267,7 +2267,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8 (vfloat16m1_t op1) { @@ -2276,7 +2276,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4 (vfloat16m2_t op1) { @@ -2285,7 +2285,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8 (vfloat16m2_t op1) { @@ -2294,7 +2294,7 @@ // CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8 (vfloat16m4_t op1) { @@ -2303,7 +2303,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4 (vfloat16mf2_t op1) { @@ -2312,7 +2312,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4 (vfloat16m1_t op1) { @@ -2321,7 +2321,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2 (vfloat16m1_t op1) { @@ -2330,7 +2330,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4 (vfloat16m2_t op1) { @@ -2339,7 +2339,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2 (vfloat16m2_t op1) { @@ -2348,7 +2348,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1 (vfloat16m2_t op1) { @@ -2357,7 +2357,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4 (vfloat16m4_t op1) { @@ -2366,7 +2366,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2 (vfloat16m4_t op1) { @@ -2375,7 +2375,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1 (vfloat16m4_t op1) { @@ -2384,7 +2384,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2 (vfloat16m4_t op1) { @@ -2393,7 +2393,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4 (vfloat16m8_t op1) { @@ -2402,7 +2402,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2 (vfloat16m8_t op1) { @@ -2411,7 +2411,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1 (vfloat16m8_t op1) { @@ -2420,7 +2420,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2 (vfloat16m8_t op1) { @@ -2429,7 +2429,7 @@ // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4 (vfloat16m8_t op1) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vset.c @@ -8,7 +8,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, vint8m1_t val) { @@ -17,7 +17,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, vint8m1_t val) { @@ -26,7 +26,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, vint8m2_t val) { @@ -35,7 +35,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, vint8m1_t val) { @@ -44,7 +44,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, vint8m2_t val) { @@ -53,7 +53,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, vint8m4_t val) { @@ -62,7 +62,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, vint16m1_t val) { @@ -71,7 +71,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, vint16m1_t val) { @@ -80,7 +80,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, vint16m2_t val) { @@ -89,7 +89,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, vint16m1_t val) { @@ -98,7 +98,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, vint16m2_t val) { @@ -107,7 +107,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, vint16m4_t val) { @@ -116,7 +116,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, vint32m1_t val) { @@ -125,7 +125,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, vint32m1_t val) { @@ -134,7 +134,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, vint32m2_t val) { @@ -143,7 +143,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, vint32m1_t val) { @@ -152,7 +152,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, vint32m2_t val) { @@ -161,7 +161,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, vint32m4_t val) { @@ -170,7 +170,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, vint64m1_t val) { @@ -179,7 +179,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, vint64m1_t val) { @@ -188,7 +188,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, vint64m2_t val) { @@ -197,7 +197,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, vint64m1_t val) { @@ -206,7 +206,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, vint64m2_t val) { @@ -215,7 +215,7 @@ // CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, vint64m4_t val) { @@ -224,7 +224,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, vuint8m1_t val) { @@ -233,7 +233,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 24) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, vuint8m1_t val) { @@ -242,7 +242,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, vuint8m2_t val) { @@ -251,7 +251,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 56) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, vuint8m1_t val) { @@ -260,7 +260,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, vuint8m2_t val) { @@ -269,7 +269,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 32) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, vuint8m4_t val) { @@ -278,7 +278,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, vuint16m1_t val) { @@ -287,7 +287,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 12) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, vuint16m1_t val) { @@ -296,7 +296,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, vuint16m2_t val) { @@ -305,7 +305,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 28) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, vuint16m1_t val) { @@ -314,7 +314,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, vuint16m2_t val) { @@ -323,7 +323,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 16) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, vuint16m4_t val) { @@ -332,7 +332,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, vuint32m1_t val) { @@ -341,7 +341,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, vuint32m1_t val) { @@ -350,7 +350,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, vuint32m2_t val) { @@ -359,7 +359,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 14) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, vuint32m1_t val) { @@ -368,7 +368,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, vuint32m2_t val) { @@ -377,7 +377,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, vuint32m4_t val) { @@ -386,7 +386,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, vuint64m1_t val) { @@ -395,7 +395,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, vuint64m1_t val) { @@ -404,7 +404,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, vuint64m2_t val) { @@ -413,7 +413,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 7) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, vuint64m1_t val) { @@ -422,7 +422,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, vuint64m2_t val) { @@ -431,7 +431,7 @@ // CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, vuint64m4_t val) { @@ -440,7 +440,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, vfloat32m1_t val) { @@ -449,7 +449,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 6) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, vfloat32m1_t val) { @@ -458,7 +458,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, vfloat32m2_t val) { @@ -467,7 +467,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 14) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 14) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, vfloat32m1_t val) { @@ -476,7 +476,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, vfloat32m2_t val) { @@ -485,7 +485,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 8) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, vfloat32m4_t val) { @@ -494,7 +494,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, vfloat64m1_t val) { @@ -503,7 +503,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 3) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 3) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, vfloat64m1_t val) { @@ -512,7 +512,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 2) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 2) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, vfloat64m2_t val) { @@ -521,7 +521,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 7) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 7) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, vfloat64m1_t val) { @@ -530,7 +530,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, vfloat64m2_t val) { @@ -539,7 +539,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 4) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, vfloat64m4_t val) { @@ -548,7 +548,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vset_v_f16m1_f16m2 (vfloat16m2_t dest, vfloat16m1_t val) { @@ -557,7 +557,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vset_v_f16m1_f16m4 (vfloat16m4_t dest, vfloat16m1_t val) { @@ -566,7 +566,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vset_v_f16m2_f16m4 (vfloat16m4_t dest, vfloat16m2_t val) { @@ -575,7 +575,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f16m1_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv4f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m1_f16m8 (vfloat16m8_t dest, vfloat16m1_t val) { @@ -584,7 +584,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f16m2_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv8f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m2_f16m8 (vfloat16m8_t dest, vfloat16m2_t val) { @@ -593,7 +593,7 @@ // CHECK-RV64-LABEL: @test_vset_v_f16m4_f16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv32f16.nxv16f16( [[DEST:%.*]], [[VAL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vset_v_f16m4_f16m8 (vfloat16m8_t dest, vfloat16m4_t val) { diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c @@ -53,9 +53,9 @@ // CHECK128-LABEL: define{{.*}} <16 x i8> @f2(<16 x i8> noundef %x) // CHECK128-NEXT: entry: // CHECK128-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) +// CHECK128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) // CHECK128-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP0]], [[CASTSCALABLESVE]], i32 1) -// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[TMP1]], i64 0) +// CHECK128-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[TMP1]], i64 0) // CHECK128-NEXT: ret <16 x i8> [[CASTFIXEDSVE]] // CHECK-LABEL: define{{.*}} void @f2( @@ -63,9 +63,9 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP1]], [[CASTSCALABLESVE]], i32 1) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) // CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); } @@ -80,14 +80,14 @@ // CHECK128-LABEL: define{{.*}} void @g( noundef %x.coerce) // CHECK128-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[X_COERCE:%.*]], i64 0) // CHECK128-NEXT: call void @f3(<16 x i8> noundef [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECK-LABEL: define{{.*}} void @g( noundef %x.coerce) // CHECK-NEXT: entry: // CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 -// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[X_COERCE:%.*]], i64 0) // CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] // CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp @@ -49,10 +49,10 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EES_( noundef %x.coerce, noundef %y.coerce) // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32( undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v[[#div(VBITS, 32)]]i32( undef, <[[#div(VBITS, 32)]] x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] typedef svint32_t vec __attribute__((arm_sve_vector_bits(N))); auto f(vec x, vec y) { return x + y; } // Returns a vec. @@ -68,11 +68,11 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EE( noundef %x.coerce) // CHECK-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[X_COERCE:%.*]], i64 0) // CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> noundef [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16 -// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16( [[X_COERCE:%.*]], i64 0) // CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], <[[#div(VBITS, 16)]] x i16>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]] // CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(<[[#div(VBITS, 16)]] x i16>* noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECKWIDE-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq-bfloat.c @@ -39,7 +39,7 @@ // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x bfloat> [[TMP4]], bfloat [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x bfloat> [[TMP5]], bfloat [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x bfloat> [[TMP6]], bfloat [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -53,7 +53,7 @@ // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x bfloat> [[TMP4]], bfloat [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x bfloat> [[TMP5]], bfloat [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x bfloat> [[TMP6]], bfloat [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c @@ -197,7 +197,7 @@ // CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CHECK-NEXT: ret [[TMP17]] // @@ -219,7 +219,7 @@ // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CPP-CHECK-NEXT: ret [[TMP17]] // @@ -242,7 +242,7 @@ // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -256,7 +256,7 @@ // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // @@ -273,7 +273,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CHECK-NEXT: ret [[TMP5]] // @@ -283,7 +283,7 @@ // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CPP-CHECK-NEXT: ret [[TMP5]] // @@ -297,7 +297,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CHECK-NEXT: ret [[TMP3]] // @@ -305,7 +305,7 @@ // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CPP-CHECK-NEXT: ret [[TMP3]] // @@ -332,7 +332,7 @@ // CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CHECK-NEXT: ret [[TMP17]] // @@ -354,7 +354,7 @@ // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15 -// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP16]], i64 0) // CPP-CHECK-NEXT: ret [[TMP17]] // @@ -377,7 +377,7 @@ // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -391,7 +391,7 @@ // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // @@ -408,7 +408,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CHECK-NEXT: ret [[TMP5]] // @@ -418,7 +418,7 @@ // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3 -// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP4]], i64 0) // CPP-CHECK-NEXT: ret [[TMP5]] // @@ -432,7 +432,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CHECK-NEXT: ret [[TMP3]] // @@ -440,7 +440,7 @@ // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> undef, i64 [[X0:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1 -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP1]], i64 0) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP2]], i64 0) // CPP-CHECK-NEXT: ret [[TMP3]] // @@ -459,7 +459,7 @@ // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7 -// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP8]], i64 0) // CHECK-NEXT: ret [[TMP9]] // @@ -473,7 +473,7 @@ // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7 -// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP8]], i64 0) // CPP-CHECK-NEXT: ret [[TMP9]] // @@ -490,7 +490,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3 -// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP4]], i64 0) // CHECK-NEXT: ret [[TMP5]] // @@ -500,7 +500,7 @@ // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3 -// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP4]], i64 0) // CPP-CHECK-NEXT: ret [[TMP5]] // @@ -514,7 +514,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP2]], i64 0) // CHECK-NEXT: ret [[TMP3]] // @@ -522,7 +522,7 @@ // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[X0:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1 -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) +// CPP-CHECK-NEXT: [[TMP2:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP1]], i64 0) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP2]], i64 0) // CPP-CHECK-NEXT: ret [[TMP3]] // @@ -566,7 +566,7 @@ // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15 // CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP17]], i64 0) // CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], zeroinitializer) // CHECK-NEXT: ret [[TMP19]] @@ -606,7 +606,7 @@ // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[FROMBOOL14]], i64 14 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[FROMBOOL15]], i64 15 // CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP17]], i64 0) // CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP19]] @@ -639,7 +639,7 @@ // CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7 // CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) -// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) +// CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP17]], i64 0) // CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], zeroinitializer) // CHECK-NEXT: [[TMP20:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( [[TMP19]]) @@ -664,7 +664,7 @@ // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7 // CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) -// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) +// CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP17]], i64 0) // CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP20:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( [[TMP19]]) @@ -688,7 +688,7 @@ // CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3 // CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) -// CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) +// CHECK-NEXT: [[TMP9:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP9]], i64 0) // CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], zeroinitializer) // CHECK-NEXT: [[TMP12:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( [[TMP11]]) @@ -705,7 +705,7 @@ // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3 // CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) -// CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) +// CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP9]], i64 0) // CPP-CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP12:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( [[TMP11]]) @@ -724,7 +724,7 @@ // CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1 // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) -// CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) +// CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP5]], i64 0) // CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], zeroinitializer) // CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( [[TMP7]]) @@ -737,7 +737,7 @@ // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> undef, i64 [[TMP0]], i64 0 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1 // CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) -// CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) +// CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP5]], i64 0) // CPP-CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( [[TMP7]]) diff --git a/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c b/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c @@ -29,10 +29,10 @@ // CHECK-LABEL: @add_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t add_i8(fixed_int8_t a, fixed_int8_t b) { @@ -41,10 +41,10 @@ // CHECK-LABEL: @add_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t add_i16(fixed_int16_t a, fixed_int16_t b) { @@ -53,10 +53,10 @@ // CHECK-LABEL: @add_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t add_i32(fixed_int32_t a, fixed_int32_t b) { @@ -65,10 +65,10 @@ // CHECK-LABEL: @add_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t add_i64(fixed_int64_t a, fixed_int64_t b) { @@ -77,10 +77,10 @@ // CHECK-LABEL: @add_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t add_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -89,10 +89,10 @@ // CHECK-LABEL: @add_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t add_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -101,10 +101,10 @@ // CHECK-LABEL: @add_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t add_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -113,10 +113,10 @@ // CHECK-LABEL: @add_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t add_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -125,13 +125,13 @@ // CHECK-LABEL: @add_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t add_f16(fixed_float16_t a, fixed_float16_t b) { @@ -140,10 +140,10 @@ // CHECK-LABEL: @add_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t add_f32(fixed_float32_t a, fixed_float32_t b) { @@ -152,10 +152,10 @@ // CHECK-LABEL: @add_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t add_f64(fixed_float64_t a, fixed_float64_t b) { @@ -164,10 +164,10 @@ // CHECK-LABEL: @add_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t add_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -176,10 +176,10 @@ // CHECK-LABEL: @add_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t add_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -188,10 +188,10 @@ // CHECK-LABEL: @add_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t add_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -200,10 +200,10 @@ // CHECK-LABEL: @add_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t add_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -212,10 +212,10 @@ // CHECK-LABEL: @add_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t add_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -224,10 +224,10 @@ // CHECK-LABEL: @add_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t add_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -236,10 +236,10 @@ // CHECK-LABEL: @add_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t add_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -248,10 +248,10 @@ // CHECK-LABEL: @add_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t add_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -260,13 +260,13 @@ // CHECK-LABEL: @add_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x float> [[CONV2]], [[CONV]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t add_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -275,10 +275,10 @@ // CHECK-LABEL: @add_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t add_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -287,10 +287,10 @@ // CHECK-LABEL: @add_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t add_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -299,11 +299,11 @@ // CHECK-LABEL: @add_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t add_scalar_i8(fixed_int8_t a, int8_t b) { @@ -312,11 +312,11 @@ // CHECK-LABEL: @add_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t add_scalar_i16(fixed_int16_t a, int16_t b) { @@ -325,11 +325,11 @@ // CHECK-LABEL: @add_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t add_scalar_i32(fixed_int32_t a, int32_t b) { @@ -338,11 +338,11 @@ // CHECK-LABEL: @add_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t add_scalar_i64(fixed_int64_t a, int64_t b) { @@ -351,11 +351,11 @@ // CHECK-LABEL: @add_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t add_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -364,11 +364,11 @@ // CHECK-LABEL: @add_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t add_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -377,11 +377,11 @@ // CHECK-LABEL: @add_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t add_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -390,11 +390,11 @@ // CHECK-LABEL: @add_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t add_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -403,11 +403,11 @@ // CHECK-LABEL: @add_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t add_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -416,11 +416,11 @@ // CHECK-LABEL: @add_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t add_scalar_f32(fixed_float32_t a, float b) { @@ -429,11 +429,11 @@ // CHECK-LABEL: @add_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[ADD:%.*]] = fadd <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[ADD]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t add_scalar_f64(fixed_float64_t a, double b) { @@ -444,10 +444,10 @@ // CHECK-LABEL: @sub_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t sub_i8(fixed_int8_t a, fixed_int8_t b) { @@ -456,10 +456,10 @@ // CHECK-LABEL: @sub_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t sub_i16(fixed_int16_t a, fixed_int16_t b) { @@ -468,10 +468,10 @@ // CHECK-LABEL: @sub_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t sub_i32(fixed_int32_t a, fixed_int32_t b) { @@ -480,10 +480,10 @@ // CHECK-LABEL: @sub_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t sub_i64(fixed_int64_t a, fixed_int64_t b) { @@ -492,10 +492,10 @@ // CHECK-LABEL: @sub_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t sub_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -504,10 +504,10 @@ // CHECK-LABEL: @sub_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t sub_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -516,10 +516,10 @@ // CHECK-LABEL: @sub_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t sub_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -528,10 +528,10 @@ // CHECK-LABEL: @sub_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t sub_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -540,13 +540,13 @@ // CHECK-LABEL: @sub_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[SUB]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t sub_f16(fixed_float16_t a, fixed_float16_t b) { @@ -555,10 +555,10 @@ // CHECK-LABEL: @sub_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t sub_f32(fixed_float32_t a, fixed_float32_t b) { @@ -567,10 +567,10 @@ // CHECK-LABEL: @sub_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t sub_f64(fixed_float64_t a, fixed_float64_t b) { @@ -579,10 +579,10 @@ // CHECK-LABEL: @sub_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t sub_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -591,10 +591,10 @@ // CHECK-LABEL: @sub_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t sub_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -603,10 +603,10 @@ // CHECK-LABEL: @sub_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t sub_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -615,10 +615,10 @@ // CHECK-LABEL: @sub_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t sub_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -627,10 +627,10 @@ // CHECK-LABEL: @sub_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t sub_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -639,10 +639,10 @@ // CHECK-LABEL: @sub_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t sub_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -651,10 +651,10 @@ // CHECK-LABEL: @sub_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t sub_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -663,10 +663,10 @@ // CHECK-LABEL: @sub_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t sub_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -675,13 +675,13 @@ // CHECK-LABEL: @sub_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[SUB]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t sub_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -690,10 +690,10 @@ // CHECK-LABEL: @sub_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t sub_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -702,10 +702,10 @@ // CHECK-LABEL: @sub_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t sub_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -714,11 +714,11 @@ // CHECK-LABEL: @sub_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t sub_scalar_i8(fixed_int8_t a, int8_t b) { @@ -727,11 +727,11 @@ // CHECK-LABEL: @sub_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t sub_scalar_i16(fixed_int16_t a, int16_t b) { @@ -740,11 +740,11 @@ // CHECK-LABEL: @sub_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t sub_scalar_i32(fixed_int32_t a, int32_t b) { @@ -753,11 +753,11 @@ // CHECK-LABEL: @sub_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t sub_scalar_i64(fixed_int64_t a, int64_t b) { @@ -766,11 +766,11 @@ // CHECK-LABEL: @sub_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t sub_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -779,11 +779,11 @@ // CHECK-LABEL: @sub_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t sub_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -792,11 +792,11 @@ // CHECK-LABEL: @sub_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t sub_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -805,11 +805,11 @@ // CHECK-LABEL: @sub_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t sub_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -818,11 +818,11 @@ // CHECK-LABEL: @sub_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t sub_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -831,11 +831,11 @@ // CHECK-LABEL: @sub_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t sub_scalar_f32(fixed_float32_t a, float b) { @@ -844,11 +844,11 @@ // CHECK-LABEL: @sub_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SUB:%.*]] = fsub <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[SUB]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t sub_scalar_f64(fixed_float64_t a, double b) { @@ -859,10 +859,10 @@ // CHECK-LABEL: @mul_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t mul_i8(fixed_int8_t a, fixed_int8_t b) { @@ -871,10 +871,10 @@ // CHECK-LABEL: @mul_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t mul_i16(fixed_int16_t a, fixed_int16_t b) { @@ -883,10 +883,10 @@ // CHECK-LABEL: @mul_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t mul_i32(fixed_int32_t a, fixed_int32_t b) { @@ -895,10 +895,10 @@ // CHECK-LABEL: @mul_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t mul_i64(fixed_int64_t a, fixed_int64_t b) { @@ -907,10 +907,10 @@ // CHECK-LABEL: @mul_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t mul_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -919,10 +919,10 @@ // CHECK-LABEL: @mul_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t mul_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -931,10 +931,10 @@ // CHECK-LABEL: @mul_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t mul_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -943,10 +943,10 @@ // CHECK-LABEL: @mul_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t mul_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -955,13 +955,13 @@ // CHECK-LABEL: @mul_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[MUL]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t mul_f16(fixed_float16_t a, fixed_float16_t b) { @@ -970,10 +970,10 @@ // CHECK-LABEL: @mul_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t mul_f32(fixed_float32_t a, fixed_float32_t b) { @@ -982,10 +982,10 @@ // CHECK-LABEL: @mul_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t mul_f64(fixed_float64_t a, fixed_float64_t b) { @@ -994,10 +994,10 @@ // CHECK-LABEL: @mul_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t mul_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1006,10 +1006,10 @@ // CHECK-LABEL: @mul_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t mul_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1018,10 +1018,10 @@ // CHECK-LABEL: @mul_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t mul_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1030,10 +1030,10 @@ // CHECK-LABEL: @mul_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t mul_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1042,10 +1042,10 @@ // CHECK-LABEL: @mul_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t mul_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1054,10 +1054,10 @@ // CHECK-LABEL: @mul_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t mul_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1066,10 +1066,10 @@ // CHECK-LABEL: @mul_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t mul_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1078,10 +1078,10 @@ // CHECK-LABEL: @mul_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t mul_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1090,13 +1090,13 @@ // CHECK-LABEL: @mul_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[MUL]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t mul_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -1105,10 +1105,10 @@ // CHECK-LABEL: @mul_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t mul_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -1117,10 +1117,10 @@ // CHECK-LABEL: @mul_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t mul_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -1129,11 +1129,11 @@ // CHECK-LABEL: @mul_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t mul_scalar_i8(fixed_int8_t a, int8_t b) { @@ -1142,11 +1142,11 @@ // CHECK-LABEL: @mul_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t mul_scalar_i16(fixed_int16_t a, int16_t b) { @@ -1155,11 +1155,11 @@ // CHECK-LABEL: @mul_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t mul_scalar_i32(fixed_int32_t a, int32_t b) { @@ -1168,11 +1168,11 @@ // CHECK-LABEL: @mul_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t mul_scalar_i64(fixed_int64_t a, int64_t b) { @@ -1181,11 +1181,11 @@ // CHECK-LABEL: @mul_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t mul_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -1194,11 +1194,11 @@ // CHECK-LABEL: @mul_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t mul_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -1207,11 +1207,11 @@ // CHECK-LABEL: @mul_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t mul_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -1220,11 +1220,11 @@ // CHECK-LABEL: @mul_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t mul_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -1233,11 +1233,11 @@ // CHECK-LABEL: @mul_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t mul_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -1246,11 +1246,11 @@ // CHECK-LABEL: @mul_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t mul_scalar_f32(fixed_float32_t a, float b) { @@ -1259,11 +1259,11 @@ // CHECK-LABEL: @mul_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[MUL:%.*]] = fmul <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[MUL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t mul_scalar_f64(fixed_float64_t a, double b) { @@ -1274,10 +1274,10 @@ // CHECK-LABEL: @div_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t div_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1286,10 +1286,10 @@ // CHECK-LABEL: @div_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t div_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1298,10 +1298,10 @@ // CHECK-LABEL: @div_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t div_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1310,10 +1310,10 @@ // CHECK-LABEL: @div_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t div_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1322,10 +1322,10 @@ // CHECK-LABEL: @div_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t div_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1334,10 +1334,10 @@ // CHECK-LABEL: @div_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t div_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1346,10 +1346,10 @@ // CHECK-LABEL: @div_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t div_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1358,10 +1358,10 @@ // CHECK-LABEL: @div_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t div_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1370,13 +1370,13 @@ // CHECK-LABEL: @div_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[DIV]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t div_f16(fixed_float16_t a, fixed_float16_t b) { @@ -1385,10 +1385,10 @@ // CHECK-LABEL: @div_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t div_f32(fixed_float32_t a, fixed_float32_t b) { @@ -1397,10 +1397,10 @@ // CHECK-LABEL: @div_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t div_f64(fixed_float64_t a, fixed_float64_t b) { @@ -1409,10 +1409,10 @@ // CHECK-LABEL: @div_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t div_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1421,10 +1421,10 @@ // CHECK-LABEL: @div_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t div_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1433,10 +1433,10 @@ // CHECK-LABEL: @div_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t div_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1445,10 +1445,10 @@ // CHECK-LABEL: @div_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t div_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1457,10 +1457,10 @@ // CHECK-LABEL: @div_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t div_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1469,10 +1469,10 @@ // CHECK-LABEL: @div_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t div_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1481,10 +1481,10 @@ // CHECK-LABEL: @div_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t div_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1493,10 +1493,10 @@ // CHECK-LABEL: @div_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t div_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1505,13 +1505,13 @@ // CHECK-LABEL: @div_inplace_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[CONV3:%.*]] = fptrunc <32 x float> [[DIV]] to <32 x half> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t div_inplace_f16(fixed_float16_t a, fixed_float16_t b) { @@ -1520,10 +1520,10 @@ // CHECK-LABEL: @div_inplace_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t div_inplace_f32(fixed_float32_t a, fixed_float32_t b) { @@ -1532,10 +1532,10 @@ // CHECK-LABEL: @div_inplace_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t div_inplace_f64(fixed_float64_t a, fixed_float64_t b) { @@ -1544,11 +1544,11 @@ // CHECK-LABEL: @div_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t div_scalar_i8(fixed_int8_t a, int8_t b) { @@ -1557,11 +1557,11 @@ // CHECK-LABEL: @div_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t div_scalar_i16(fixed_int16_t a, int16_t b) { @@ -1570,11 +1570,11 @@ // CHECK-LABEL: @div_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t div_scalar_i32(fixed_int32_t a, int32_t b) { @@ -1583,11 +1583,11 @@ // CHECK-LABEL: @div_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t div_scalar_i64(fixed_int64_t a, int64_t b) { @@ -1596,11 +1596,11 @@ // CHECK-LABEL: @div_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t div_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -1609,11 +1609,11 @@ // CHECK-LABEL: @div_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t div_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -1622,11 +1622,11 @@ // CHECK-LABEL: @div_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t div_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -1635,11 +1635,11 @@ // CHECK-LABEL: @div_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = udiv <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t div_scalar_u64(fixed_uint64_t a, uint64_t b) { @@ -1648,11 +1648,11 @@ // CHECK-LABEL: @div_scalar_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <32 x half> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8f16.v32f16( undef, <32 x half> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float16_t div_scalar_f16(fixed_float16_t a, __fp16 b) { @@ -1661,11 +1661,11 @@ // CHECK-LABEL: @div_scalar_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <16 x float> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4f32.v16f32( undef, <16 x float> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float32_t div_scalar_f32(fixed_float32_t a, float b) { @@ -1674,11 +1674,11 @@ // CHECK-LABEL: @div_scalar_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[DIV:%.*]] = fdiv <8 x double> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[DIV]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_float64_t div_scalar_f64(fixed_float64_t a, double b) { @@ -1689,10 +1689,10 @@ // CHECK-LABEL: @rem_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rem_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1701,10 +1701,10 @@ // CHECK-LABEL: @rem_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rem_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1713,10 +1713,10 @@ // CHECK-LABEL: @rem_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rem_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1725,10 +1725,10 @@ // CHECK-LABEL: @rem_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rem_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1737,10 +1737,10 @@ // CHECK-LABEL: @rem_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rem_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1749,10 +1749,10 @@ // CHECK-LABEL: @rem_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rem_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1761,10 +1761,10 @@ // CHECK-LABEL: @rem_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rem_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1773,10 +1773,10 @@ // CHECK-LABEL: @rem_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rem_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1785,10 +1785,10 @@ // CHECK-LABEL: @rem_inplace_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rem_inplace_i8(fixed_int8_t a, fixed_int8_t b) { @@ -1797,10 +1797,10 @@ // CHECK-LABEL: @rem_inplace_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rem_inplace_i16(fixed_int16_t a, fixed_int16_t b) { @@ -1809,10 +1809,10 @@ // CHECK-LABEL: @rem_inplace_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rem_inplace_i32(fixed_int32_t a, fixed_int32_t b) { @@ -1821,10 +1821,10 @@ // CHECK-LABEL: @rem_inplace_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rem_inplace_i64(fixed_int64_t a, fixed_int64_t b) { @@ -1833,10 +1833,10 @@ // CHECK-LABEL: @rem_inplace_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rem_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -1845,10 +1845,10 @@ // CHECK-LABEL: @rem_inplace_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rem_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -1857,10 +1857,10 @@ // CHECK-LABEL: @rem_inplace_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rem_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -1869,10 +1869,10 @@ // CHECK-LABEL: @rem_inplace_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rem_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -1881,11 +1881,11 @@ // CHECK-LABEL: @rem_scalar_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rem_scalar_i8(fixed_int8_t a, int8_t b) { @@ -1894,11 +1894,11 @@ // CHECK-LABEL: @rem_scalar_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rem_scalar_i16(fixed_int16_t a, int16_t b) { @@ -1907,11 +1907,11 @@ // CHECK-LABEL: @rem_scalar_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rem_scalar_i32(fixed_int32_t a, int32_t b) { @@ -1920,11 +1920,11 @@ // CHECK-LABEL: @rem_scalar_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = srem <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rem_scalar_i64(fixed_int64_t a, int64_t b) { @@ -1933,11 +1933,11 @@ // CHECK-LABEL: @rem_scalar_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <64 x i8> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rem_scalar_u8(fixed_uint8_t a, uint8_t b) { @@ -1946,11 +1946,11 @@ // CHECK-LABEL: @rem_scalar_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <32 x i16> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rem_scalar_u16(fixed_uint16_t a, uint16_t b) { @@ -1959,11 +1959,11 @@ // CHECK-LABEL: @rem_scalar_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rem_scalar_u32(fixed_uint32_t a, uint32_t b) { @@ -1972,11 +1972,11 @@ // CHECK-LABEL: @rem_scalar_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[REM:%.*]] = urem <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[REM]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rem_scalar_u64(fixed_uint64_t a, uint64_t b) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c b/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-bitwise-ops.c @@ -30,11 +30,11 @@ // CHECK-LABEL: @and_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[AND]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -44,10 +44,10 @@ // CHECK-LABEL: @and_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) { @@ -56,10 +56,10 @@ // CHECK-LABEL: @and_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) { @@ -68,10 +68,10 @@ // CHECK-LABEL: @and_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) { @@ -80,10 +80,10 @@ // CHECK-LABEL: @and_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) { @@ -92,10 +92,10 @@ // CHECK-LABEL: @and_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -104,10 +104,10 @@ // CHECK-LABEL: @and_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -116,10 +116,10 @@ // CHECK-LABEL: @and_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -128,10 +128,10 @@ // CHECK-LABEL: @and_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -143,11 +143,11 @@ // CHECK-LABEL: @or_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[OR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -157,10 +157,10 @@ // CHECK-LABEL: @or_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) { @@ -169,10 +169,10 @@ // CHECK-LABEL: @or_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) { @@ -181,10 +181,10 @@ // CHECK-LABEL: @or_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) { @@ -193,10 +193,10 @@ // CHECK-LABEL: @or_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) { @@ -205,10 +205,10 @@ // CHECK-LABEL: @or_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -217,10 +217,10 @@ // CHECK-LABEL: @or_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -229,10 +229,10 @@ // CHECK-LABEL: @or_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -241,10 +241,10 @@ // CHECK-LABEL: @or_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -256,11 +256,11 @@ // CHECK-LABEL: @xor_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[XOR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -270,10 +270,10 @@ // CHECK-LABEL: @xor_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) { @@ -282,10 +282,10 @@ // CHECK-LABEL: @xor_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) { @@ -294,10 +294,10 @@ // CHECK-LABEL: @xor_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) { @@ -306,10 +306,10 @@ // CHECK-LABEL: @xor_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) { @@ -318,10 +318,10 @@ // CHECK-LABEL: @xor_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -330,10 +330,10 @@ // CHECK-LABEL: @xor_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -342,10 +342,10 @@ // CHECK-LABEL: @xor_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -354,10 +354,10 @@ // CHECK-LABEL: @xor_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -369,9 +369,9 @@ // CHECK-LABEL: @neg_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i8> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[NEG]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP1]] // @@ -381,9 +381,9 @@ // CHECK-LABEL: @neg_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neg_i8(fixed_int8_t a) { @@ -392,9 +392,9 @@ // CHECK-LABEL: @neg_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neg_i16(fixed_int16_t a) { @@ -403,9 +403,9 @@ // CHECK-LABEL: @neg_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neg_i32(fixed_int32_t a) { @@ -414,9 +414,9 @@ // CHECK-LABEL: @neg_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neg_i64(fixed_int64_t a) { @@ -425,9 +425,9 @@ // CHECK-LABEL: @neg_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t neg_u8(fixed_uint8_t a) { @@ -436,9 +436,9 @@ // CHECK-LABEL: @neg_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t neg_u16(fixed_uint16_t a) { @@ -447,9 +447,9 @@ // CHECK-LABEL: @neg_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t neg_u32(fixed_uint32_t a) { @@ -458,9 +458,9 @@ // CHECK-LABEL: @neg_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t neg_u64(fixed_uint64_t a) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c b/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-compare-ops.c @@ -30,12 +30,12 @@ // CHECK-LABEL: @eq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -45,11 +45,11 @@ // CHECK-LABEL: @eq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t eq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -58,11 +58,11 @@ // CHECK-LABEL: @eq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t eq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -71,11 +71,11 @@ // CHECK-LABEL: @eq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t eq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -84,11 +84,11 @@ // CHECK-LABEL: @eq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t eq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -97,11 +97,11 @@ // CHECK-LABEL: @eq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t eq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -110,11 +110,11 @@ // CHECK-LABEL: @eq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t eq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -123,11 +123,11 @@ // CHECK-LABEL: @eq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t eq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -136,11 +136,11 @@ // CHECK-LABEL: @eq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp eq <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t eq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -149,14 +149,14 @@ // CHECK-LABEL: @eq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t eq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -165,11 +165,11 @@ // CHECK-LABEL: @eq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t eq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -178,11 +178,11 @@ // CHECK-LABEL: @eq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t eq_f64(fixed_float64_t a, fixed_float64_t b) { @@ -194,12 +194,12 @@ // CHECK-LABEL: @neq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -209,11 +209,11 @@ // CHECK-LABEL: @neq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -222,11 +222,11 @@ // CHECK-LABEL: @neq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -235,11 +235,11 @@ // CHECK-LABEL: @neq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -248,11 +248,11 @@ // CHECK-LABEL: @neq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -261,11 +261,11 @@ // CHECK-LABEL: @neq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -274,11 +274,11 @@ // CHECK-LABEL: @neq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -287,11 +287,11 @@ // CHECK-LABEL: @neq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -300,11 +300,11 @@ // CHECK-LABEL: @neq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ne <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -313,14 +313,14 @@ // CHECK-LABEL: @neq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp une <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -329,11 +329,11 @@ // CHECK-LABEL: @neq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp une <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -342,11 +342,11 @@ // CHECK-LABEL: @neq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp une <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neq_f64(fixed_float64_t a, fixed_float64_t b) { @@ -358,12 +358,12 @@ // CHECK-LABEL: @lt_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -373,11 +373,11 @@ // CHECK-LABEL: @lt_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lt_i8(fixed_int8_t a, fixed_int8_t b) { @@ -386,11 +386,11 @@ // CHECK-LABEL: @lt_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lt_i16(fixed_int16_t a, fixed_int16_t b) { @@ -399,11 +399,11 @@ // CHECK-LABEL: @lt_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lt_i32(fixed_int32_t a, fixed_int32_t b) { @@ -412,11 +412,11 @@ // CHECK-LABEL: @lt_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp slt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lt_i64(fixed_int64_t a, fixed_int64_t b) { @@ -425,11 +425,11 @@ // CHECK-LABEL: @lt_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lt_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -438,11 +438,11 @@ // CHECK-LABEL: @lt_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lt_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -451,11 +451,11 @@ // CHECK-LABEL: @lt_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lt_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -464,11 +464,11 @@ // CHECK-LABEL: @lt_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ult <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lt_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -477,14 +477,14 @@ // CHECK-LABEL: @lt_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lt_f16(fixed_float16_t a, fixed_float16_t b) { @@ -493,11 +493,11 @@ // CHECK-LABEL: @lt_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lt_f32(fixed_float32_t a, fixed_float32_t b) { @@ -506,11 +506,11 @@ // CHECK-LABEL: @lt_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lt_f64(fixed_float64_t a, fixed_float64_t b) { @@ -522,12 +522,12 @@ // CHECK-LABEL: @leq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -537,11 +537,11 @@ // CHECK-LABEL: @leq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t leq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -550,11 +550,11 @@ // CHECK-LABEL: @leq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t leq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -563,11 +563,11 @@ // CHECK-LABEL: @leq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t leq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -576,11 +576,11 @@ // CHECK-LABEL: @leq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sle <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t leq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -589,11 +589,11 @@ // CHECK-LABEL: @leq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t leq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -602,11 +602,11 @@ // CHECK-LABEL: @leq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t leq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -615,11 +615,11 @@ // CHECK-LABEL: @leq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t leq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -628,11 +628,11 @@ // CHECK-LABEL: @leq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ule <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t leq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -641,14 +641,14 @@ // CHECK-LABEL: @leq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t leq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -657,11 +657,11 @@ // CHECK-LABEL: @leq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t leq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -670,11 +670,11 @@ // CHECK-LABEL: @leq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ole <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t leq_f64(fixed_float64_t a, fixed_float64_t b) { @@ -686,12 +686,12 @@ // CHECK-LABEL: @gt_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -701,11 +701,11 @@ // CHECK-LABEL: @gt_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t gt_i8(fixed_int8_t a, fixed_int8_t b) { @@ -714,11 +714,11 @@ // CHECK-LABEL: @gt_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t gt_i16(fixed_int16_t a, fixed_int16_t b) { @@ -727,11 +727,11 @@ // CHECK-LABEL: @gt_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t gt_i32(fixed_int32_t a, fixed_int32_t b) { @@ -740,11 +740,11 @@ // CHECK-LABEL: @gt_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t gt_i64(fixed_int64_t a, fixed_int64_t b) { @@ -753,11 +753,11 @@ // CHECK-LABEL: @gt_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t gt_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -766,11 +766,11 @@ // CHECK-LABEL: @gt_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t gt_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -779,11 +779,11 @@ // CHECK-LABEL: @gt_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t gt_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -792,11 +792,11 @@ // CHECK-LABEL: @gt_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp ugt <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t gt_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -805,14 +805,14 @@ // CHECK-LABEL: @gt_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t gt_f16(fixed_float16_t a, fixed_float16_t b) { @@ -821,11 +821,11 @@ // CHECK-LABEL: @gt_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t gt_f32(fixed_float32_t a, fixed_float32_t b) { @@ -834,11 +834,11 @@ // CHECK-LABEL: @gt_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp ogt <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t gt_f64(fixed_float64_t a, fixed_float64_t b) { @@ -850,12 +850,12 @@ // CHECK-LABEL: @geq_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to -// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to -// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[SEXT]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // @@ -865,11 +865,11 @@ // CHECK-LABEL: @geq_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t geq_i8(fixed_int8_t a, fixed_int8_t b) { @@ -878,11 +878,11 @@ // CHECK-LABEL: @geq_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t geq_i16(fixed_int16_t a, fixed_int16_t b) { @@ -891,11 +891,11 @@ // CHECK-LABEL: @geq_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t geq_i32(fixed_int32_t a, fixed_int32_t b) { @@ -904,11 +904,11 @@ // CHECK-LABEL: @geq_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp sge <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t geq_i64(fixed_int64_t a, fixed_int64_t b) { @@ -917,11 +917,11 @@ // CHECK-LABEL: @geq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <64 x i1> [[CMP]] to <64 x i8> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t geq_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -930,11 +930,11 @@ // CHECK-LABEL: @geq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t geq_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -943,11 +943,11 @@ // CHECK-LABEL: @geq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t geq_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -956,11 +956,11 @@ // CHECK-LABEL: @geq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = icmp uge <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t geq_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -969,14 +969,14 @@ // CHECK-LABEL: @geq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float> // CHECK-NEXT: [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float> // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <32 x float> [[CONV]], [[CONV2]] // CHECK-NEXT: [[SEXT:%.*]] = sext <32 x i1> [[CMP]] to <32 x i32> // CHECK-NEXT: [[CONV3:%.*]] = trunc <32 x i32> [[SEXT]] to <32 x i16> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[CONV3]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t geq_f16(fixed_float16_t a, fixed_float16_t b) { @@ -985,11 +985,11 @@ // CHECK-LABEL: @geq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <16 x float> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i32> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t geq_f32(fixed_float32_t a, fixed_float32_t b) { @@ -998,11 +998,11 @@ // CHECK-LABEL: @geq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CMP:%.*]] = fcmp oge <8 x double> [[A]], [[B]] // CHECK-NEXT: [[SEXT:%.*]] = sext <8 x i1> [[CMP]] to <8 x i64> -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SEXT]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t geq_f64(fixed_float64_t a, fixed_float64_t b) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c b/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-shift-ops.c @@ -27,10 +27,10 @@ // CHECK-LABEL: @lshift_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lshift_i8(fixed_int8_t a, fixed_int8_t b) { @@ -39,10 +39,10 @@ // CHECK-LABEL: @rshift_i8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rshift_i8(fixed_int8_t a, fixed_int8_t b) { @@ -51,10 +51,10 @@ // CHECK-LABEL: @lshift_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t lshift_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -63,10 +63,10 @@ // CHECK-LABEL: @rshift_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rshift_u8(fixed_uint8_t a, fixed_uint8_t b) { @@ -75,10 +75,10 @@ // CHECK-LABEL: @lshift_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lshift_i16(fixed_int16_t a, fixed_int16_t b) { @@ -87,10 +87,10 @@ // CHECK-LABEL: @rshift_i16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rshift_i16(fixed_int16_t a, fixed_int16_t b) { @@ -99,10 +99,10 @@ // CHECK-LABEL: @lshift_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t lshift_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -111,10 +111,10 @@ // CHECK-LABEL: @rshift_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rshift_u16(fixed_uint16_t a, fixed_uint16_t b) { @@ -123,10 +123,10 @@ // CHECK-LABEL: @lshift_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lshift_i32(fixed_int32_t a, fixed_int32_t b) { @@ -135,10 +135,10 @@ // CHECK-LABEL: @rshift_i32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rshift_i32(fixed_int32_t a, fixed_int32_t b) { @@ -147,10 +147,10 @@ // CHECK-LABEL: @lshift_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t lshift_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -159,10 +159,10 @@ // CHECK-LABEL: @rshift_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rshift_u32(fixed_uint32_t a, fixed_uint32_t b) { @@ -171,10 +171,10 @@ // CHECK-LABEL: @lshift_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lshift_i64(fixed_int64_t a, fixed_int64_t b) { @@ -183,10 +183,10 @@ // CHECK-LABEL: @rshift_i64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rshift_i64(fixed_int64_t a, fixed_int64_t b) { @@ -195,10 +195,10 @@ // CHECK-LABEL: @lshift_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t lshift_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -207,10 +207,10 @@ // CHECK-LABEL: @rshift_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[B]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rshift_u64(fixed_uint64_t a, fixed_uint64_t b) { @@ -219,13 +219,13 @@ // CHECK-LABEL: @lshift_i8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lshift_i8_rsplat(fixed_int8_t a, int8_t b) { @@ -234,11 +234,11 @@ // CHECK-LABEL: @lshift_i8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t lshift_i8_lsplat(fixed_int8_t a, int8_t b) { @@ -247,13 +247,13 @@ // CHECK-LABEL: @rshift_i8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rshift_i8_rsplat(fixed_int8_t a, int8_t b) { @@ -262,11 +262,11 @@ // CHECK-LABEL: @rshift_i8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t rshift_i8_lsplat(fixed_int8_t a, int8_t b) { @@ -275,13 +275,13 @@ // CHECK-LABEL: @lshift_u8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t lshift_u8_rsplat(fixed_uint8_t a, uint8_t b) { @@ -290,11 +290,11 @@ // CHECK-LABEL: @lshift_u8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t lshift_u8_lsplat(fixed_uint8_t a, uint8_t b) { @@ -303,13 +303,13 @@ // CHECK-LABEL: @rshift_u8_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i32> [[SPLAT_SPLATINSERT]], <64 x i32> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <64 x i32> [[SPLAT_SPLAT]] to <64 x i8> // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rshift_u8_rsplat(fixed_uint8_t a, uint8_t b) { @@ -318,11 +318,11 @@ // CHECK-LABEL: @rshift_u8_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.experimental.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <64 x i8> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t rshift_u8_lsplat(fixed_uint8_t a, uint8_t b) { @@ -331,13 +331,13 @@ // CHECK-LABEL: @lshift_i16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lshift_i16_rsplat(fixed_int16_t a, int16_t b) { @@ -346,11 +346,11 @@ // CHECK-LABEL: @lshift_i16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t lshift_i16_lsplat(fixed_int16_t a, int16_t b) { @@ -359,13 +359,13 @@ // CHECK-LABEL: @rshift_i16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rshift_i16_rsplat(fixed_int16_t a, int16_t b) { @@ -374,11 +374,11 @@ // CHECK-LABEL: @rshift_i16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t rshift_i16_lsplat(fixed_int16_t a, int16_t b) { @@ -387,13 +387,13 @@ // CHECK-LABEL: @lshift_u16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t lshift_u16_rsplat(fixed_uint16_t a, uint16_t b) { @@ -402,11 +402,11 @@ // CHECK-LABEL: @lshift_u16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t lshift_u16_lsplat(fixed_uint16_t a, uint16_t b) { @@ -415,13 +415,13 @@ // CHECK-LABEL: @rshift_u16_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i16> // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[A]], [[SH_PROM]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rshift_u16_rsplat(fixed_uint16_t a, uint16_t b) { @@ -430,11 +430,11 @@ // CHECK-LABEL: @rshift_u16_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i16> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t rshift_u16_lsplat(fixed_uint16_t a, uint16_t b) { @@ -443,11 +443,11 @@ // CHECK-LABEL: @lshift_i32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lshift_i32_rsplat(fixed_int32_t a, int32_t b) { @@ -456,11 +456,11 @@ // CHECK-LABEL: @lshift_i32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t lshift_i32_lsplat(fixed_int32_t a, int32_t b) { @@ -469,11 +469,11 @@ // CHECK-LABEL: @rshift_i32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rshift_i32_rsplat(fixed_int32_t a, int32_t b) { @@ -482,11 +482,11 @@ // CHECK-LABEL: @rshift_i32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t rshift_i32_lsplat(fixed_int32_t a, int32_t b) { @@ -495,11 +495,11 @@ // CHECK-LABEL: @lshift_u32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t lshift_u32_rsplat(fixed_uint32_t a, uint32_t b) { @@ -508,11 +508,11 @@ // CHECK-LABEL: @lshift_u32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t lshift_u32_lsplat(fixed_uint32_t a, uint32_t b) { @@ -521,11 +521,11 @@ // CHECK-LABEL: @rshift_u32_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rshift_u32_rsplat(fixed_uint32_t a, uint32_t b) { @@ -534,11 +534,11 @@ // CHECK-LABEL: @rshift_u32_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i32> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t rshift_u32_lsplat(fixed_uint32_t a, uint32_t b) { @@ -547,11 +547,11 @@ // CHECK-LABEL: @lshift_i64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lshift_i64_rsplat(fixed_int64_t a, int64_t b) { @@ -560,11 +560,11 @@ // CHECK-LABEL: @lshift_i64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t lshift_i64_lsplat(fixed_int64_t a, int64_t b) { @@ -573,11 +573,11 @@ // CHECK-LABEL: @rshift_i64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rshift_i64_rsplat(fixed_int64_t a, int64_t b) { @@ -586,11 +586,11 @@ // CHECK-LABEL: @rshift_i64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t rshift_i64_lsplat(fixed_int64_t a, int64_t b) { @@ -599,11 +599,11 @@ // CHECK-LABEL: @lshift_u64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t lshift_u64_rsplat(fixed_uint64_t a, uint64_t b) { @@ -612,11 +612,11 @@ // CHECK-LABEL: @lshift_u64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t lshift_u64_lsplat(fixed_uint64_t a, uint64_t b) { @@ -625,11 +625,11 @@ // CHECK-LABEL: @rshift_u64_rsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[A]], [[SPLAT_SPLAT]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rshift_u64_rsplat(fixed_uint64_t a, uint64_t b) { @@ -638,11 +638,11 @@ // CHECK-LABEL: @rshift_u64_lsplat( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i32 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i64> [[SPLAT_SPLAT]], [[A]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t rshift_u64_lsplat(fixed_uint64_t a, uint64_t b) { diff --git a/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c b/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c --- a/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c +++ b/clang/test/CodeGen/aarch64-sve-vls-subscript-ops.c @@ -28,7 +28,7 @@ // CHECK-LABEL: @subscript_int16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i16 [[VECEXT]] // @@ -38,7 +38,7 @@ // CHECK-LABEL: @subscript_uint16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.experimental.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x i16> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i16 [[VECEXT]] // @@ -48,7 +48,7 @@ // CHECK-LABEL: @subscript_int32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i32 [[VECEXT]] // @@ -58,7 +58,7 @@ // CHECK-LABEL: @subscript_uint32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x i32> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i32 [[VECEXT]] // @@ -68,7 +68,7 @@ // CHECK-LABEL: @subscript_int64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i64 [[VECEXT]] // @@ -78,7 +78,7 @@ // CHECK-LABEL: @subscript_uint64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x i64> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret i64 [[VECEXT]] // @@ -88,7 +88,7 @@ // CHECK-LABEL: @subscript_float16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.experimental.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <32 x half> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret half [[VECEXT]] // @@ -98,7 +98,7 @@ // CHECK-LABEL: @subscript_float32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <16 x float> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret float [[VECEXT]] // @@ -108,7 +108,7 @@ // CHECK-LABEL: @subscript_float64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[VECEXT:%.*]] = extractelement <8 x double> [[A]], i64 [[B:%.*]] // CHECK-NEXT: ret double [[VECEXT]] // diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_dup_neonq.c @@ -16,13 +16,13 @@ // CHECK-LABEL: @test_svdup_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z19test_svdup_neonq_s811__Int8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -32,13 +32,13 @@ // CHECK-LABEL: @test_svdup_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s1611__Int16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -48,13 +48,13 @@ // CHECK-LABEL: @test_svdup_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s3211__Int32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -64,13 +64,13 @@ // CHECK-LABEL: @test_svdup_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_s6411__Int64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -80,13 +80,13 @@ // CHECK-LABEL: @test_svdup_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z19test_svdup_neonq_u812__Uint8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -96,13 +96,13 @@ // CHECK-LABEL: @test_svdup_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u1612__Uint16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -112,13 +112,13 @@ // CHECK-LABEL: @test_svdup_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u3212__Uint32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -128,13 +128,13 @@ // CHECK-LABEL: @test_svdup_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_u6412__Uint64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -144,13 +144,13 @@ // CHECK-LABEL: @test_svdup_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f1613__Float16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -158,18 +158,18 @@ return SVE_ACLE_FUNC(svdup_neonq, _f16, , )(n); } -// CHECK-NEXT %0 = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> %n, i64 0) +// CHECK-NEXT %0 = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> %n, i64 0) // CHECK-NEXT %1 = call @llvm.aarch64.sve.dupq.lane.nxv4f32( %0, i64 0) // CHECK-NEXT ret %1 // CHECK-LABEL: @test_svdup_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f3213__Float32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4f32( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -179,13 +179,13 @@ // CHECK-LABEL: @test_svdup_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z20test_svdup_neonq_f6413__Float64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2f64( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // @@ -195,13 +195,13 @@ // CHECK-LABEL: @test_svdup_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP0]], i64 0) // CHECK-NEXT: ret [[TMP1]] // // CPP-CHECK-LABEL: @_Z21test_svdup_neonq_bf1614__Bfloat16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( [[TMP0]], i64 0) // CPP-CHECK-NEXT: ret [[TMP1]] // diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_get_neonq.c @@ -16,12 +16,12 @@ // CHECK-LABEL: @test_svget_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svget_neonq_s8u10__SVInt8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]] // int8x16_t test_svget_neonq_s8(svint8_t n) { @@ -31,12 +31,12 @@ // // CHECK-LABEL: @test_svget_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s16u11__SVInt16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]] // int16x8_t test_svget_neonq_s16(svint16_t n) { @@ -45,12 +45,12 @@ // CHECK-LABEL: @test_svget_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s32u11__SVInt32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]] // int32x4_t test_svget_neonq_s32(svint32_t n) { @@ -59,12 +59,12 @@ // CHECK-LABEL: @test_svget_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x i64> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_s64u11__SVInt64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]] // int64x2_t test_svget_neonq_s64(svint64_t n) { @@ -73,12 +73,12 @@ // CHECK-LABEL: @test_svget_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CHECK-NEXT: ret <16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svget_neonq_u8u11__SVUint8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <16 x i8> [[TMP0]] // uint8x16_t test_svget_neonq_u8(svuint8_t n) { @@ -87,12 +87,12 @@ // CHECK-LABEL: @test_svget_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x i16> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u16u12__SVUint16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x i16> [[TMP0]] // uint16x8_t test_svget_neonq_u16(svuint16_t n) { @@ -101,12 +101,12 @@ // CHECK-LABEL: @test_svget_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x i32> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u32u12__SVUint32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x i32> [[TMP0]] // uint32x4_t test_svget_neonq_u32(svuint32_t n) { @@ -115,12 +115,12 @@ // CHECK-LABEL: @test_svget_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x i64> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_u64u12__SVUint64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x i64> [[TMP0]] // uint64x2_t test_svget_neonq_u64(svuint64_t n) { @@ -129,12 +129,12 @@ // CHECK-LABEL: @test_svget_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x half> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f16u13__SVFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.experimental.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.vector.extract.v8f16.nxv8f16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x half> [[TMP0]] // float16x8_t test_svget_neonq_f16(svfloat16_t n) { @@ -143,12 +143,12 @@ // CHECK-LABEL: @test_svget_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) // CHECK-NEXT: ret <4 x float> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f32u13__SVFloat32_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.vector.extract.v4f32.nxv4f32( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <4 x float> [[TMP0]] // float32x4_t test_svget_neonq_f32(svfloat32_t n) { @@ -157,12 +157,12 @@ // CHECK-LABEL: @test_svget_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) // CHECK-NEXT: ret <2 x double> [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svget_neonq_f64u13__SVFloat64_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <2 x double> [[TMP0]] // float64x2_t test_svget_neonq_f64(svfloat64_t n) { @@ -171,12 +171,12 @@ // CHECK-LABEL: @test_svget_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) // CHECK-NEXT: ret <8 x bfloat> [[TMP0]] // // CPP-CHECK-LABEL: @_Z21test_svget_neonq_bf16u14__SVBFloat16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret <8 x bfloat> [[TMP0]] // bfloat16x8_t test_svget_neonq_bf16(svbfloat16_t n) { diff --git a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c --- a/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c +++ b/clang/test/CodeGen/aarch64_neon_sve_bridge_intrinsics/acle_neon_sve_bridge_set_neonq.c @@ -16,12 +16,12 @@ // CHECK-LABEL: @test_svset_neonq_s8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svset_neonq_s8u10__SVInt8_t11__Int8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint8_t test_svset_neonq_s8(svint8_t s, int8x16_t n) { @@ -30,12 +30,12 @@ // CHECK-LABEL: @test_svset_neonq_s16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_s16u11__SVInt16_t11__Int16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint16_t test_svset_neonq_s16(svint16_t s, int16x8_t n) { @@ -44,12 +44,12 @@ // CHECK-LABEL: @test_svset_neonq_s32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_s32u11__SVInt32_t11__Int32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint32_t test_svset_neonq_s32(svint32_t s, int32x4_t n) { @@ -58,12 +58,12 @@ // CHECK-LABEL: @test_svset_neonq_s64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_s64u11__SVInt64_t11__Int64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svint64_t test_svset_neonq_s64(svint64_t s, int64x2_t n) { @@ -72,12 +72,12 @@ // CHECK-LABEL: @test_svset_neonq_u8( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z19test_svset_neonq_u8u11__SVUint8_t12__Uint8x16_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv16i8.v16i8( [[S:%.*]], <16 x i8> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint8_t test_svset_neonq_u8(svuint8_t s, uint8x16_t n) { @@ -86,12 +86,12 @@ // CHECK-LABEL: @test_svset_neonq_u16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_u16u12__SVUint16_t12__Uint16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8i16.v8i16( [[S:%.*]], <8 x i16> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint16_t test_svset_neonq_u16(svuint16_t s, uint16x8_t n) { @@ -100,12 +100,12 @@ // CHECK-LABEL: @test_svset_neonq_u32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_u32u12__SVUint32_t12__Uint32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[S:%.*]], <4 x i32> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint32_t test_svset_neonq_u32(svuint32_t s, uint32x4_t n) { @@ -114,12 +114,12 @@ // CHECK-LABEL: @test_svset_neonq_u64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_u64u12__SVUint64_t12__Uint64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( [[S:%.*]], <2 x i64> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svuint64_t test_svset_neonq_u64(svuint64_t s, uint64x2_t n) { @@ -128,12 +128,12 @@ // CHECK-LABEL: @test_svset_neonq_f16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_f16u13__SVFloat16_t13__Float16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( [[S:%.*]], <8 x half> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat16_t test_svset_neonq_f16(svfloat16_t s, float16x8_t n) { @@ -142,12 +142,12 @@ // CHECK-LABEL: @test_svset_neonq_f32( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_f32u13__SVFloat32_t13__Float32x4_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( [[S:%.*]], <4 x float> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat32_t test_svset_neonq_f32(svfloat32_t s, float32x4_t n) { @@ -156,12 +156,12 @@ // CHECK-LABEL: @test_svset_neonq_f64( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z20test_svset_neonq_f64u13__SVFloat64_t13__Float64x2_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( [[S:%.*]], <2 x double> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svfloat64_t test_svset_neonq_f64(svfloat64_t s, float64x2_t n) { @@ -170,12 +170,12 @@ // CHECK-LABEL: @test_svset_neonq_bf16( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) // CHECK-NEXT: ret [[TMP0]] // // CPP-CHECK-LABEL: @_Z21test_svset_neonq_bf16u14__SVBFloat16_t14__Bfloat16x8_t( // CPP-CHECK-NEXT: entry: -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) +// CPP-CHECK-NEXT: [[TMP0:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( [[S:%.*]], <8 x bfloat> [[N:%.*]], i64 0) // CPP-CHECK-NEXT: ret [[TMP0]] // svbfloat16_t test_svset_neonq_bf16(svbfloat16_t s, bfloat16x8_t n) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c @@ -32,21 +32,21 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_int64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_int64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svint64_t read_int64(struct struct_int64 *s) { @@ -55,21 +55,21 @@ // CHECK-128-LABEL: @write_int64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_int64( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i64> [[CASTFIXEDSVE]], <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_int64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -86,21 +86,21 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_float64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_float64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svfloat64_t read_float64(struct struct_float64 *s) { @@ -109,21 +109,21 @@ // CHECK-128-LABEL: @write_float64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.vector.extract.v2f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x double> [[CASTFIXEDSVE]], <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_float64( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x double> [[CASTFIXEDSVE]], <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_float64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -140,21 +140,21 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_bfloat16( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_bfloat16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) { @@ -163,21 +163,21 @@ // CHECK-128-LABEL: @write_bfloat16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bfloat16( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <16 x bfloat> [[CASTFIXEDSVE]], <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bfloat16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[X:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void @@ -194,7 +194,7 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-128-NEXT: ret [[TMP1]] // @@ -202,7 +202,7 @@ // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> [[TMP0]], i64 0) // CHECK-256-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-256-NEXT: ret [[TMP1]] // @@ -210,7 +210,7 @@ // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-512-NEXT: ret [[TMP1]] // @@ -221,7 +221,7 @@ // CHECK-128-LABEL: @write_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void @@ -229,7 +229,7 @@ // CHECK-256-LABEL: @write_bool( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( [[TMP0]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i8> [[CASTFIXEDSVE]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void @@ -237,7 +237,7 @@ // CHECK-512-LABEL: @write_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast %x to -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -45,7 +45,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[COERCE1]] to * // CHECK-NEXT: store [[X:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE2]] // svint32_t sizeless_caller(svint32_t x) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -63,7 +63,7 @@ // CHECK-LABEL: @lax_cast( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = alloca <16 x i32>, align 64 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[TMP0:%.*]], align 64, !tbaa [[TBAA6:![0-9]+]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[TMP0]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 64, !tbaa [[TBAA6]] @@ -76,7 +76,7 @@ // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) { @@ -85,7 +85,7 @@ // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // @@ -96,7 +96,7 @@ // CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) { @@ -105,7 +105,7 @@ // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) // CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c @@ -24,23 +24,23 @@ // CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP3]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP4:%.*]] = bitcast [[CASTFIXEDSVE2]] to // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP4]]) // CHECK-NEXT: store [[TMP5]], * [[PG]], align 2 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP8:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP6]]) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE]], [[TMP8]]) -// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t foo(svbool_t pred, svint32_t vec) { @@ -57,7 +57,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t test_ptr_to_global() { @@ -78,7 +78,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[ARRAYIDX]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t array_arg(fixed_int32_t arr[]) { @@ -96,7 +96,7 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 2 // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, <8 x i8>* [[RETVAL]], align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP2]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP2]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: ret [[TMP3]] // @@ -121,25 +121,25 @@ // CHECK-NEXT: store <8 x i8> , <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[XX]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]] -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[ADD]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[ADD]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = bitcast [[CASTFIXEDSVE2]] to // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP5]]) // CHECK-NEXT: store [[TMP6]], * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load , * [[PG]], align 2 // CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP7]]) // CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP10]], [[CASTSCALABLESVE]], [[TMP9]]) -// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t test_cast(svbool_t pred, svint32_t vec) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c @@ -22,13 +22,13 @@ // CHECK-128-LABEL: @write_global_i64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_i64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-512-NEXT: ret void // @@ -36,13 +36,13 @@ // CHECK-128-LABEL: @write_global_bf16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bf16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -51,14 +51,14 @@ // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -71,13 +71,13 @@ // CHECK-128-LABEL: @read_global_i64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_i64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svint64_t read_global_i64() { return global_i64; } @@ -85,13 +85,13 @@ // CHECK-128-LABEL: @read_global_bf16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_bf16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svbfloat16_t read_global_bf16() { return global_bf16; } @@ -99,14 +99,14 @@ // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-512-NEXT: ret [[TMP1]] // diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -17268,27 +17268,35 @@ """""""""" The argument to this intrinsic must be a vector of floating-point values. -'``llvm.experimental.vector.insert``' Intrinsic -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +'``llvm.vector.insert``' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Syntax: """"""" -This is an overloaded intrinsic. You can use ``llvm.experimental.vector.insert`` -to insert a fixed-width vector into a scalable vector, but not the other way -around. +This is an overloaded intrinsic. :: - declare @llvm.experimental.vector.insert.nxv4f32.v4f32( %vec, <4 x float> %subvec, i64 %idx) - declare @llvm.experimental.vector.insert.nxv2f64.v2f64( %vec, <2 x double> %subvec, i64 %idx) + ; Insert fixed type into scalable type + declare @llvm.vector.insert.nxv4f32.v4f32( %vec, <4 x float> %subvec, i64 ) + declare @llvm.vector.insert.nxv2f64.v2f64( %vec, <2 x double> %subvec, i64 ) + + ; Insert scalable type into scalable type + declare @llvm.vector.insert.nxv4f64.nxv2f64( %vec, %subvec, i64 ) + + ; Insert fixed type into fixed type + declare <4 x double> @llvm.vector.insert.v4f64.v2f64(<4 x double> %vec, <2 x double> %subvec, i64 ) Overview: """"""""" -The '``llvm.experimental.vector.insert.*``' intrinsics insert a vector into another vector +The '``llvm.vector.insert.*``' intrinsics insert a vector into another vector starting from a given index. The return type matches the type of the vector we insert into. Conceptually, this can be used to build a scalable vector out of -non-scalable vectors. +non-scalable vectors, however this intrinsic can also be used on purely fixed +types. + +Scalable vectors can only be inserted into other scalable vectors. Arguments: """""""""" @@ -17306,27 +17314,35 @@ is undefined. -'``llvm.experimental.vector.extract``' Intrinsic -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +'``llvm.vector.extract``' Intrinsic +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Syntax: """"""" -This is an overloaded intrinsic. You can use -``llvm.experimental.vector.extract`` to extract a fixed-width vector from a -scalable vector, but not the other way around. +This is an overloaded intrinsic. :: - declare <4 x float> @llvm.experimental.vector.extract.v4f32.nxv4f32( %vec, i64 %idx) - declare <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( %vec, i64 %idx) + ; Extract fixed type from scalable type + declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32( %vec, i64 ) + declare <2 x double> @llvm.vector.extract.v2f64.nxv2f64( %vec, i64 ) + + ; Extract scalable type from scalable type + declare @llvm.vector.extract.nxv2f32.nxv4f32( %vec, i64 ) + + ; Extract fixed type from fixed type + declare <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %vec, i64 ) Overview: """"""""" -The '``llvm.experimental.vector.extract.*``' intrinsics extract a vector from -within another vector starting from a given index. The return type must be -explicitly specified. Conceptually, this can be used to decompose a scalable -vector into non-scalable parts. +The '``llvm.vector.extract.*``' intrinsics extract a vector from within another +vector starting from a given index. The return type must be explicitly +specified. Conceptually, this can be used to decompose a scalable vector into +non-scalable parts, however this intrinsic can also be used on purely fixed +types. + +Scalable vectors can only be extracted from other scalable vectors. Arguments: """""""""" diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -66,6 +66,9 @@ Changes to the LLVM IR ---------------------- +* Renamed ``llvm.experimental.vector.extract`` intrinsic to ``llvm.vector.extract``. +* Renamed ``llvm.experimental.vector.insert`` intrinsic to ``llvm.vector.insert``. + Changes to building LLVM ------------------------ diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1474,7 +1474,7 @@ // The cost of materialising a constant integer vector. return TargetTransformInfo::TCC_Basic; } - case Intrinsic::experimental_vector_extract: { + case Intrinsic::vector_extract: { // FIXME: Handle case where a scalable vector is extracted from a scalable // vector if (isa(RetTy)) @@ -1484,7 +1484,7 @@ cast(Args[0]->getType()), None, Index, cast(RetTy)); } - case Intrinsic::experimental_vector_insert: { + case Intrinsic::vector_insert: { // FIXME: Handle case where a scalable vector is inserted into a scalable // vector if (isa(Args[1]->getType())) diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -914,18 +914,18 @@ Name); } - /// Create a call to the experimental.vector.extract intrinsic. + /// Create a call to the vector.extract intrinsic. CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, const Twine &Name = "") { - return CreateIntrinsic(Intrinsic::experimental_vector_extract, + return CreateIntrinsic(Intrinsic::vector_extract, {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, Name); } - /// Create a call to the experimental.vector.insert intrinsic. + /// Create a call to the vector.insert intrinsic. CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, Value *Idx, const Twine &Name = "") { - return CreateIntrinsic(Intrinsic::experimental_vector_insert, + return CreateIntrinsic(Intrinsic::vector_insert, {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx}, nullptr, Name); } diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1961,13 +1961,13 @@ def int_vscale : DefaultAttrsIntrinsic<[llvm_anyint_ty], [], [IntrNoMem]>; //===---------- Intrinsics to perform subvector insertion/extraction ------===// -def int_experimental_vector_insert : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty], - [IntrNoMem, ImmArg>]>; +def int_vector_insert : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty], + [IntrNoMem, ImmArg>]>; -def int_experimental_vector_extract : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [llvm_anyvector_ty, llvm_i64_ty], - [IntrNoMem, ImmArg>]>; +def int_vector_extract : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [llvm_anyvector_ty, llvm_i64_ty], + [IntrNoMem, ImmArg>]>; //===----------------- Pointer Authentication Intrinsics ------------------===// // diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -6021,14 +6021,14 @@ break; } - case Intrinsic::experimental_vector_extract: { + case Intrinsic::vector_extract: { Type *ReturnType = F->getReturnType(); // (extract_vector (insert_vector _, X, 0), 0) -> X unsigned IdxN = cast(Op1)->getZExtValue(); Value *X = nullptr; - if (match(Op0, m_Intrinsic( - m_Value(), m_Value(X), m_Zero())) && + if (match(Op0, m_Intrinsic(m_Value(), m_Value(X), + m_Zero())) && IdxN == 0 && X->getType() == ReturnType) return X; @@ -6169,7 +6169,7 @@ return nullptr; } - case Intrinsic::experimental_vector_insert: { + case Intrinsic::vector_insert: { Value *Vec = Call->getArgOperand(0); Value *SubVec = Call->getArgOperand(1); Value *Idx = Call->getArgOperand(2); @@ -6179,8 +6179,8 @@ // where: Y is X, or Y is undef unsigned IdxN = cast(Idx)->getZExtValue(); Value *X = nullptr; - if (match(SubVec, m_Intrinsic( - m_Value(X), m_Zero())) && + if (match(SubVec, + m_Intrinsic(m_Value(X), m_Zero())) && (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 && X->getType() == ReturnType) return X; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7206,7 +7206,7 @@ setValue(&I, SetCC); return; } - case Intrinsic::experimental_vector_insert: { + case Intrinsic::vector_insert: { SDValue Vec = getValue(I.getOperand(0)); SDValue SubVec = getValue(I.getOperand(1)); SDValue Index = getValue(I.getOperand(2)); @@ -7223,7 +7223,7 @@ Index)); return; } - case Intrinsic::experimental_vector_extract: { + case Intrinsic::vector_extract: { SDValue Vec = getValue(I.getOperand(0)); SDValue Index = getValue(I.getOperand(1)); EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -748,6 +748,23 @@ break; } case 'e': { + if (Name.startswith("experimental.vector.extract.")) { + rename(F); + Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; + NewFn = Intrinsic::getDeclaration(F->getParent(), + Intrinsic::vector_extract, Tys); + return true; + } + + if (Name.startswith("experimental.vector.insert.")) { + rename(F); + auto Args = F->getFunctionType()->params(); + Type *Tys[] = {Args[0], Args[1]}; + NewFn = Intrinsic::getDeclaration(F->getParent(), + Intrinsic::vector_insert, Tys); + return true; + } + SmallVector Groups; static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[a-z][0-9]+"); if (R.match(Name, &Groups)) { diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -5512,7 +5512,7 @@ &Call); break; } - case Intrinsic::experimental_vector_insert: { + case Intrinsic::vector_insert: { Value *Vec = Call.getArgOperand(0); Value *SubVec = Call.getArgOperand(1); Value *Idx = Call.getArgOperand(2); @@ -5524,11 +5524,11 @@ ElementCount VecEC = VecTy->getElementCount(); ElementCount SubVecEC = SubVecTy->getElementCount(); Check(VecTy->getElementType() == SubVecTy->getElementType(), - "experimental_vector_insert parameters must have the same element " + "vector_insert parameters must have the same element " "type.", &Call); Check(IdxN % SubVecEC.getKnownMinValue() == 0, - "experimental_vector_insert index must be a constant multiple of " + "vector_insert index must be a constant multiple of " "the subvector's known minimum vector length."); // If this insertion is not the 'mixed' case where a fixed vector is @@ -5537,12 +5537,12 @@ if (VecEC.isScalable() == SubVecEC.isScalable()) { Check(IdxN < VecEC.getKnownMinValue() && IdxN + SubVecEC.getKnownMinValue() <= VecEC.getKnownMinValue(), - "subvector operand of experimental_vector_insert would overrun the " + "subvector operand of vector_insert would overrun the " "vector being inserted into."); } break; } - case Intrinsic::experimental_vector_extract: { + case Intrinsic::vector_extract: { Value *Vec = Call.getArgOperand(0); Value *Idx = Call.getArgOperand(1); unsigned IdxN = cast(Idx)->getZExtValue(); @@ -5554,11 +5554,11 @@ ElementCount ResultEC = ResultTy->getElementCount(); Check(ResultTy->getElementType() == VecTy->getElementType(), - "experimental_vector_extract result must have the same element " + "vector_extract result must have the same element " "type as the input vector.", &Call); Check(IdxN % ResultEC.getKnownMinValue() == 0, - "experimental_vector_extract index must be a constant multiple of " + "vector_extract index must be a constant multiple of " "the result type's known minimum vector length."); // If this extraction is not the 'mixed' case where a fixed vector is is @@ -5567,7 +5567,7 @@ if (VecEC.isScalable() == ResultEC.isScalable()) { Check(IdxN < VecEC.getKnownMinValue() && IdxN + ResultEC.getKnownMinValue() <= VecEC.getKnownMinValue(), - "experimental_vector_extract would overrun."); + "vector_extract would overrun."); } break; } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -652,8 +652,7 @@ return None; auto *VecIns = dyn_cast(DupQLane->getArgOperand(0)); - if (!VecIns || - VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert) + if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert) return None; // Where the vector insert is a fixed constant vector insert into undef at diff --git a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp --- a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp +++ b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp @@ -305,8 +305,7 @@ // ..where the value stored comes from a vector extract.. auto *IntrI = dyn_cast(Store->getOperand(0)); - if (!IntrI || - IntrI->getIntrinsicID() != Intrinsic::experimental_vector_extract) + if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::vector_extract) return false; // ..that is extracting from index 0.. @@ -365,8 +364,7 @@ // ..whose operand is a vector_insert.. auto *IntrI = dyn_cast(BitCast->getOperand(0)); - if (!IntrI || - IntrI->getIntrinsicID() != Intrinsic::experimental_vector_insert) + if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::vector_insert) return false; // ..that is inserting into index zero of an undef vector.. @@ -451,8 +449,8 @@ continue; switch (F.getIntrinsicID()) { - case Intrinsic::experimental_vector_extract: - case Intrinsic::experimental_vector_insert: + case Intrinsic::vector_extract: + case Intrinsic::vector_insert: case Intrinsic::aarch64_sve_ptrue: for (User *U : F.users()) Functions.insert(cast(U)->getFunction()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2357,7 +2357,7 @@ } break; } - case Intrinsic::experimental_vector_insert: { + case Intrinsic::vector_insert: { Value *Vec = II->getArgOperand(0); Value *SubVec = II->getArgOperand(1); Value *Idx = II->getArgOperand(2); @@ -2403,7 +2403,7 @@ } break; } - case Intrinsic::experimental_vector_extract: { + case Intrinsic::vector_extract: { Value *Vec = II->getArgOperand(0); Value *Idx = II->getArgOperand(1); diff --git a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll --- a/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll +++ b/llvm/test/Analysis/CostModel/AArch64/sve-intrinsics.ll @@ -3,22 +3,22 @@ define void @vector_insert_extract( %v0, %v1, <16 x i32> %v2) { ; CHECK-LABEL: 'vector_insert_extract' -; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 81 for instruction: %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void ; - %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) - %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) - %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) - %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) + %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) + %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) + %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) + %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ret void } -declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(, i64) -declare @llvm.experimental.vector.insert.nxv4i32.v16i32(, <16 x i32>, i64) -declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32(, i64) -declare @llvm.experimental.vector.insert.nxv16i32.nxv4i32(, , i64) +declare <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(, i64) +declare @llvm.vector.insert.nxv4i32.v16i32(, <16 x i32>, i64) +declare @llvm.vector.extract.nxv4i32.nxv16i32(, i64) +declare @llvm.vector.insert.nxv16i32.nxv4i32(, , i64) define void @reductions( %v0, %v1, %v2, %v3) { diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll --- a/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll +++ b/llvm/test/Analysis/CostModel/RISCV/rvv-shuffle.ll @@ -36,22 +36,22 @@ define void @vector_insert_extract( %v0, %v1, <16 x i32> %v2) { ; CHECK-LABEL: 'vector_insert_extract' -; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) -; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void ; - %extract_fixed_from_scalable = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( %v0, i64 0) - %insert_fixed_into_scalable = call @llvm.experimental.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) - %extract_scalable_from_scalable = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) - %insert_scalable_into_scalable = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) + %extract_fixed_from_scalable = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( %v0, i64 0) + %insert_fixed_into_scalable = call @llvm.vector.insert.nxv4i32.v16i32( %v0, <16 x i32> %v2, i64 0) + %extract_scalable_from_scalable = call @llvm.vector.extract.nxv4i32.nxv16i32( %v1, i64 0) + %insert_scalable_into_scalable = call @llvm.vector.insert.nxv16i32.nxv4i32( %v1, %v0, i64 0) ret void } -declare <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(, i64) -declare @llvm.experimental.vector.insert.nxv4i32.v16i32(, <16 x i32>, i64) -declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32(, i64) -declare @llvm.experimental.vector.insert.nxv16i32.nxv4i32(, , i64) +declare <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(, i64) +declare @llvm.vector.insert.nxv4i32.v16i32(, <16 x i32>, i64) +declare @llvm.vector.extract.nxv4i32.nxv16i32(, i64) +declare @llvm.vector.insert.nxv16i32.nxv4i32(, , i64) define void @vector_reverse() { ; CHECK-LABEL: 'vector_reverse' diff --git a/llvm/test/Bitcode/upgrade-vector-insert-extract-intrinsics.ll b/llvm/test/Bitcode/upgrade-vector-insert-extract-intrinsics.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Bitcode/upgrade-vector-insert-extract-intrinsics.ll @@ -0,0 +1,22 @@ +; RUN: opt -S < %s | FileCheck %s +; RUN: llvm-dis < %s.bc | FileCheck %s + +define @insert( %a, <4 x i8> %b) { +; CHECK-LABEL: @insert +; CHECK: %res = call @llvm.vector.insert.nxv16i8.v4i8( %a, <4 x i8> %b, i64 0) + %res = call @llvm.experimental.vector.insert.nxv16i8.v4i8( %a, <4 x i8> %b, i64 0) + ret %res +} + +define <4 x i8> @extract( %a) { +; CHECK-LABEL: @extract +; CHECK: %res = call <4 x i8> @llvm.vector.extract.v4i8.nxv16i8( %a, i64 0) + %res = call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv16i8( %a, i64 0) + ret <4 x i8> %res +} + +declare @llvm.experimental.vector.insert.nxv16i8.v4i8(, <4 x i8>, i64 immarg) +; CHECK: declare @llvm.vector.insert.nxv16i8.v4i8(, <4 x i8>, i64 immarg) + +declare <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv16i8(, i64 immarg) +; CHECK: declare <4 x i8> @llvm.vector.extract.v4i8.nxv16i8(, i64 immarg) diff --git a/llvm/test/Bitcode/upgrade-vector-insert-extract-intrinsics.ll.bc b/llvm/test/Bitcode/upgrade-vector-insert-extract-intrinsics.ll.bc new file mode 100644 index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 GIT binary patch literal 0 Hc$@ @llvm.experimental.vector.extract.v16f32.nxv4f32(, i64) -declare @llvm.experimental.vector.insert.nxv2f64.v8f64(, <8 x double>, i64) +declare <16 x float> @llvm.vector.extract.v16f32.nxv4f32(, i64) +declare @llvm.vector.insert.nxv2f64.v8f64(, <8 x double>, i64) define @reproducer_one( %vec_a) #0 { - %a = call <16 x float> @llvm.experimental.vector.extract.v16f32.nxv4f32( %vec_a, i64 0) + %a = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32( %vec_a, i64 0) %b = bitcast <16 x float> %a to <8 x double> - %retval = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> %b, i64 0) + %retval = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> %b, i64 0) ret %retval } define @reproducer_two(<4 x double> %a, <4 x double> %b) #0 { %concat = shufflevector <4 x double> %a, <4 x double> %b, <8 x i32> - %retval = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> %concat, i64 0) + %retval = call @llvm.vector.insert.nxv2f64.v8f64( undef, <8 x double> %concat, i64 0) ret %retval } diff --git a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll --- a/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll +++ b/llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll @@ -17,7 +17,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load , * %b - %ins = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( %vec, %subvec, i64 0) + %ins = call @llvm.vector.insert.nxv8i8.nxv4i8( %vec, %subvec, i64 0) ret %ins } @@ -33,7 +33,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load , * %b - %ins = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( %vec, %subvec, i64 4) + %ins = call @llvm.vector.insert.nxv8i8.nxv4i8( %vec, %subvec, i64 4) ret %ins } @@ -49,7 +49,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load , * %b - %ins = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( %vec, %subvec, i64 0) + %ins = call @llvm.vector.insert.nxv4i16.nxv2i16( %vec, %subvec, i64 0) ret %ins } @@ -65,7 +65,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load , * %b - %ins = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( %vec, %subvec, i64 2) + %ins = call @llvm.vector.insert.nxv4i16.nxv2i16( %vec, %subvec, i64 2) ret %ins } @@ -83,7 +83,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <8 x i8>, <8 x i8>* %b - %ins = call @llvm.experimental.vector.insert.nxv8i8.v8i8( %vec, <8 x i8> %subvec, i64 0) + %ins = call @llvm.vector.insert.nxv8i8.v8i8( %vec, <8 x i8> %subvec, i64 0) ret %ins } @@ -111,7 +111,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <8 x i8>, <8 x i8>* %b - %ins = call @llvm.experimental.vector.insert.nxv8i8.v8i8( %vec, <8 x i8> %subvec, i64 8) + %ins = call @llvm.vector.insert.nxv8i8.v8i8( %vec, <8 x i8> %subvec, i64 8) ret %ins } @@ -127,7 +127,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <4 x i16>, <4 x i16>* %b - %ins = call @llvm.experimental.vector.insert.nxv4i16.v4i16( %vec, <4 x i16> %subvec, i64 0) + %ins = call @llvm.vector.insert.nxv4i16.v4i16( %vec, <4 x i16> %subvec, i64 0) ret %ins } @@ -155,7 +155,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <4 x i16>, <4 x i16>* %b - %ins = call @llvm.experimental.vector.insert.nxv4i16.v4i16( %vec, <4 x i16> %subvec, i64 4) + %ins = call @llvm.vector.insert.nxv4i16.v4i16( %vec, <4 x i16> %subvec, i64 4) ret %ins } @@ -171,7 +171,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <2 x i32>, <2 x i32>* %b - %ins = call @llvm.experimental.vector.insert.nxv2i32.v2i32( %vec, <2 x i32> %subvec, i64 0) + %ins = call @llvm.vector.insert.nxv2i32.v2i32( %vec, <2 x i32> %subvec, i64 0) ret %ins } @@ -199,7 +199,7 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <2 x i32>, <2 x i32>* %b - %ins = call @llvm.experimental.vector.insert.nxv2i32.v2i32( %vec, <2 x i32> %subvec, i64 2) + %ins = call @llvm.vector.insert.nxv2i32.v2i32( %vec, <2 x i32> %subvec, i64 2) ret %ins } @@ -228,18 +228,18 @@ ; CHECK-NEXT: ret %vec = load , * %a %subvec = load <8 x i32>, <8 x i32>* %b - %ins = call @llvm.experimental.vector.insert.nxv2i32.v8i32( %vec, <8 x i32> %subvec, i64 8) + %ins = call @llvm.vector.insert.nxv2i32.v8i32( %vec, <8 x i32> %subvec, i64 8) ret %ins } -declare @llvm.experimental.vector.insert.nxv8i8.nxv4i8(, , i64) -declare @llvm.experimental.vector.insert.nxv4i16.nxv2i16(, , i64) +declare @llvm.vector.insert.nxv8i8.nxv4i8(, , i64) +declare @llvm.vector.insert.nxv4i16.nxv2i16(, , i64) -declare @llvm.experimental.vector.insert.nxv8i8.v8i8(, <8 x i8>, i64) -declare @llvm.experimental.vector.insert.nxv4i16.v4i16(, <4 x i16>, i64) -declare @llvm.experimental.vector.insert.nxv2i32.v2i32(, <2 x i32>, i64) +declare @llvm.vector.insert.nxv8i8.v8i8(, <8 x i8>, i64) +declare @llvm.vector.insert.nxv4i16.v4i16(, <4 x i16>, i64) +declare @llvm.vector.insert.nxv2i32.v2i32(, <2 x i32>, i64) -declare @llvm.experimental.vector.insert.nxv2i32.v8i32(, <8 x i32>, i64) +declare @llvm.vector.insert.nxv2i32.v8i32(, <8 x i32>, i64) attributes #0 = { nounwind "target-features"="+sve" } attributes #1 = { nounwind "target-features"="+sve" vscale_range(4,4) } diff --git a/llvm/test/CodeGen/AArch64/split-vector-insert.ll b/llvm/test/CodeGen/AArch64/split-vector-insert.ll --- a/llvm/test/CodeGen/AArch64/split-vector-insert.ll +++ b/llvm/test/CodeGen/AArch64/split-vector-insert.ll @@ -5,8 +5,8 @@ target triple = "aarch64-unknown-linux-gnu" attributes #0 = {"target-features"="+sve" uwtable} -declare @llvm.experimental.vector.insert.nxv2i64.v8i64(, <8 x i64>, i64) -declare @llvm.experimental.vector.insert.nxv2f64.v8f64(, <8 x double>, i64) +declare @llvm.vector.insert.nxv2i64.v8i64(, <8 x i64>, i64) +declare @llvm.vector.insert.nxv2f64.v8f64(, <8 x double>, i64) define @test_nxv2i64_v8i64( %a, <8 x i64> %b) #0 { ; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2i64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0> @@ -61,7 +61,7 @@ - %r = call @llvm.experimental.vector.insert.nxv2i64.v8i64( %a, <8 x i64> %b, i64 0) + %r = call @llvm.vector.insert.nxv2i64.v8i64( %a, <8 x i64> %b, i64 0) ret %r } @@ -118,6 +118,6 @@ - %r = call @llvm.experimental.vector.insert.nxv2f64.v8f64( %a, <8 x double> %b, i64 0) + %r = call @llvm.vector.insert.nxv2f64.v8f64( %a, <8 x double> %b, i64 0) ret %r } diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll @@ -4,8 +4,8 @@ ; CHECK-ERROR: ERROR: Extracting a fixed-length vector from an illegal scalable vector is not yet supported define <4 x i32> @extract_v4i32_nxv16i32_12( %arg) { - %ext = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv16i32( %arg, i64 12) + %ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv16i32( %arg, i64 12) ret <4 x i32> %ext } -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv16i32(, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.nxv16i32(, i64) diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll @@ -7,7 +7,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( %vec, i64 0) + %retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( %vec, i64 0) ret <2 x i64> %retval } @@ -30,7 +30,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( %vec, i64 2) + %retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( %vec, i64 2) ret <2 x i64> %retval } @@ -40,7 +40,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %vec, i64 0) + %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( %vec, i64 0) ret <4 x i32> %retval } @@ -63,7 +63,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %vec, i64 4) + %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( %vec, i64 4) ret <4 x i32> %retval } @@ -74,7 +74,7 @@ ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv2i32( %vec, i64 0) + %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32( %vec, i64 0) ret <4 x i32> %retval } @@ -100,7 +100,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv2i32( %vec, i64 4) + %retval = call <4 x i32> @llvm.vector.extract.v4i32.nxv2i32( %vec, i64 4) ret <4 x i32> %retval } @@ -110,7 +110,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( %vec, i64 0) + %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( %vec, i64 0) ret <8 x i16> %retval } @@ -133,7 +133,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( %vec, i64 8) + %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv8i16( %vec, i64 8) ret <8 x i16> %retval } @@ -144,7 +144,7 @@ ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv4i16( %vec, i64 0) + %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv4i16( %vec, i64 0) ret <8 x i16> %retval } @@ -170,7 +170,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv4i16( %vec, i64 8) + %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv4i16( %vec, i64 8) ret <8 x i16> %retval } @@ -182,7 +182,7 @@ ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv2i16( %vec, i64 0) + %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv2i16( %vec, i64 0) ret <8 x i16> %retval } @@ -209,7 +209,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv2i16( %vec, i64 8) + %retval = call <8 x i16> @llvm.vector.extract.v8i16.nxv2i16( %vec, i64 8) ret <8 x i16> %retval } @@ -219,7 +219,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( %vec, i64 0) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( %vec, i64 0) ret <16 x i8> %retval } @@ -241,7 +241,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( %vec, i64 16) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv16i8( %vec, i64 16) ret <16 x i8> %retval } @@ -252,7 +252,7 @@ ; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv8i8( %vec, i64 0) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8( %vec, i64 0) ret <16 x i8> %retval } @@ -278,7 +278,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv8i8( %vec, i64 16) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8( %vec, i64 16) ret <16 x i8> %retval } @@ -290,7 +290,7 @@ ; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv4i8( %vec, i64 0) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv4i8( %vec, i64 0) ret <16 x i8> %retval } @@ -317,7 +317,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv4i8( %vec, i64 16) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv4i8( %vec, i64 16) ret <16 x i8> %retval } @@ -330,7 +330,7 @@ ; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0 ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv2i8( %vec, i64 0) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv2i8( %vec, i64 0) ret <16 x i8> %retval } @@ -357,7 +357,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv2i8( %vec, i64 16) + %retval = call <16 x i8> @llvm.vector.extract.v16i8.nxv2i8( %vec, i64 16) ret <16 x i8> %retval } @@ -374,7 +374,7 @@ ; CHECK-NEXT: mov v0.s[1], w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %mask = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %inmask, i64 0) + %mask = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1( %inmask, i64 0) ret <2 x i1> %mask } @@ -391,7 +391,7 @@ ; CHECK-NEXT: mov v0.h[3], w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %mask = call <4 x i1> @llvm.experimental.vector.extract.v4i1.nxv4i1( %inmask, i64 0) + %mask = call <4 x i1> @llvm.vector.extract.v4i1.nxv4i1( %inmask, i64 0) ret <4 x i1> %mask } @@ -416,7 +416,7 @@ ; CHECK-NEXT: mov v0.b[7], w8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %mask = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv8i1( %inmask, i64 0) + %mask = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1( %inmask, i64 0) ret <8 x i1> %mask } @@ -456,7 +456,7 @@ ; CHECK-NEXT: mov v0.b[14], w9 ; CHECK-NEXT: mov v0.b[15], w8 ; CHECK-NEXT: ret - %mask = call <16 x i1> @llvm.experimental.vector.extract.v16i1.nxv16i1( %inmask, i64 0) + %mask = call <16 x i1> @llvm.vector.extract.v16i1.nxv16i1( %inmask, i64 0) ret <16 x i1> %mask } @@ -481,7 +481,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( %vec, i64 2) + %retval = call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( %vec, i64 2) ret <2 x i64> %retval } @@ -504,7 +504,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( %vec, i64 4) + %retval = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( %vec, i64 4) ret <4 x i64> %retval } @@ -520,7 +520,7 @@ %ptr = getelementptr inbounds i32, i32* %addr, i64 %idx %bc = bitcast i32* %ptr to * %ld = load , * %bc, align 16 - %out = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %ld, i64 0) + %out = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( %ld, i64 0) ret <4 x i32> %out } @@ -536,7 +536,7 @@ ; CHECK-NEXT: ret %ins = insertelement poison, float %f, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call <2 x float> @llvm.experimental.vector.extract.v2f32.nxv4f32( %splat, i64 0) + %ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32( %splat, i64 0) ret <2 x float> %ext } @@ -547,7 +547,7 @@ ; CHECK-NEXT: ret %ins = insertelement poison, float 1.0, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call <2 x float> @llvm.experimental.vector.extract.v2f32.nxv4f32( %splat, i64 0) + %ext = call <2 x float> @llvm.vector.extract.v2f32.nxv4f32( %splat, i64 0) ret <2 x float> %ext } @@ -558,32 +558,32 @@ ; CHECK-NEXT: ret %ins = insertelement poison, i32 1, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv8i32( %splat, i64 0) + %ext = call <4 x i32> @llvm.vector.extract.v4i32.nxv8i32( %splat, i64 0) ret <4 x i32> %ext } attributes #0 = { vscale_range(2,2) } attributes #1 = { vscale_range(8,8) } -declare <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(, i64) +declare <2 x i64> @llvm.vector.extract.v2i64.nxv2i64(, i64) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(, i64) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv2i32(, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.nxv4i32(, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.nxv2i32(, i64) -declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(, i64) -declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv4i16(, i64) -declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv2i16(, i64) +declare <8 x i16> @llvm.vector.extract.v8i16.nxv8i16(, i64) +declare <8 x i16> @llvm.vector.extract.v8i16.nxv4i16(, i64) +declare <8 x i16> @llvm.vector.extract.v8i16.nxv2i16(, i64) -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(, i64) -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv8i8(, i64) -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv4i8(, i64) -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv2i8(, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.nxv16i8(, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.nxv4i8(, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.nxv2i8(, i64) -declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1(, i64) -declare <4 x i1> @llvm.experimental.vector.extract.v4i1.nxv4i1(, i64) -declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv8i1(, i64) -declare <16 x i1> @llvm.experimental.vector.extract.v16i1.nxv16i1(, i64) +declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1(, i64) +declare <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(, i64) +declare <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(, i64) +declare <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(, i64) -declare <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(, i64) -declare <2 x float> @llvm.experimental.vector.extract.v2f32.nxv4f32(, i64) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv8i32(, i64) +declare <4 x i64> @llvm.vector.extract.v4i64.nxv2i64(, i64) +declare <2 x float> @llvm.vector.extract.v2f32.nxv4f32(, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.nxv8i32(, i64) diff --git a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-scalable-vector.ll @@ -7,7 +7,7 @@ ; CHECK-LABEL: extract_nxv1i32_nxv4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( %vec, i64 0) + %retval = call @llvm.vector.extract.nxv1i32.nxv4i32( %vec, i64 0) ret %retval } @@ -15,12 +15,12 @@ ; CHECK-LABEL: extract_nxv1i16_nxv6i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.extract.nxv1i16.nxv6i16( %vec, i64 0) + %retval = call @llvm.vector.extract.nxv1i16.nxv6i16( %vec, i64 0) ret %retval } -declare @llvm.experimental.vector.extract.nxv1i32.nxv4i32(, i64) -declare @llvm.experimental.vector.extract.nxv1i16.nxv6i16(, i64) +declare @llvm.vector.extract.nxv1i32.nxv4i32(, i64) +declare @llvm.vector.extract.nxv1i16.nxv6i16(, i64) ; ; Extract half i1 vector that needs promotion from legal type. @@ -30,7 +30,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i1.nxv16i1( %in, i64 0) + %res = call @llvm.vector.extract.nxv8i1.nxv16i1( %in, i64 0) ret %res } @@ -39,11 +39,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i1.nxv16i1( %in, i64 8) + %res = call @llvm.vector.extract.nxv8i1.nxv16i1( %in, i64 8) ret %res } -declare @llvm.experimental.vector.extract.nxv8i1.nxv16i1(, i64) +declare @llvm.vector.extract.nxv8i1.nxv16i1(, i64) ; ; Extract i1 vector that needs widening from one that needs widening. @@ -52,7 +52,7 @@ ; CHECK-LABEL: extract_nxv14i1_nxv28i1_0: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv14i1.nxv28i1( %in, i64 0) + %res = call @llvm.vector.extract.nxv14i1.nxv28i1( %in, i64 0) ret %res } @@ -95,11 +95,11 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: .cfi_restore w29 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv14i1.nxv28i1( %in, i64 14) + %res = call @llvm.vector.extract.nxv14i1.nxv28i1( %in, i64 14) ret %res } -declare @llvm.experimental.vector.extract.nxv14i1.nxv28i1(, i64) +declare @llvm.vector.extract.nxv14i1.nxv28i1(, i64) ; ; Extract half i1 vector that needs promotion from one that needs splitting. @@ -109,7 +109,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i1.nxv32i1( %in, i64 0) + %res = call @llvm.vector.extract.nxv8i1.nxv32i1( %in, i64 0) ret %res } @@ -118,7 +118,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i1.nxv32i1( %in, i64 8) + %res = call @llvm.vector.extract.nxv8i1.nxv32i1( %in, i64 8) ret %res } @@ -127,7 +127,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: punpklo p0.h, p1.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i1.nxv32i1( %in, i64 16) + %res = call @llvm.vector.extract.nxv8i1.nxv32i1( %in, i64 16) ret %res } @@ -136,11 +136,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: punpkhi p0.h, p1.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i1.nxv32i1( %in, i64 24) + %res = call @llvm.vector.extract.nxv8i1.nxv32i1( %in, i64 24) ret %res } -declare @llvm.experimental.vector.extract.nxv8i1.nxv32i1(, i64) +declare @llvm.vector.extract.nxv8i1.nxv32i1(, i64) ; ; Extract 1/4th i1 vector that needs promotion from legal type. @@ -151,7 +151,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %in, i64 0) + %res = call @llvm.vector.extract.nxv4i1.nxv16i1( %in, i64 0) ret %res } @@ -161,7 +161,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %in, i64 4) + %res = call @llvm.vector.extract.nxv4i1.nxv16i1( %in, i64 4) ret %res } @@ -171,7 +171,7 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %in, i64 8) + %res = call @llvm.vector.extract.nxv4i1.nxv16i1( %in, i64 8) ret %res } @@ -181,11 +181,11 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %in, i64 12) + %res = call @llvm.vector.extract.nxv4i1.nxv16i1( %in, i64 12) ret %res } -declare @llvm.experimental.vector.extract.nxv4i1.nxv16i1(, i64) +declare @llvm.vector.extract.nxv4i1.nxv16i1(, i64) ; ; Extract 1/8th i1 vector that needs promotion from legal type. @@ -197,7 +197,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 0) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 0) ret %res } @@ -208,7 +208,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 2) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 2) ret %res } @@ -219,7 +219,7 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 4) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 4) ret %res } @@ -230,7 +230,7 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 6) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 6) ret %res } @@ -241,7 +241,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 8) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 8) ret %res } @@ -252,7 +252,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 10) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 10) ret %res } @@ -263,7 +263,7 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 12) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 12) ret %res } @@ -274,11 +274,11 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %in, i64 14) + %res = call @llvm.vector.extract.nxv2i1.nxv16i1( %in, i64 14) ret %res } -declare @llvm.experimental.vector.extract.nxv2i1.nxv16i1(, i64) +declare @llvm.vector.extract.nxv2i1.nxv16i1(, i64) ; ; Extract i1 vector that needs promotion from one that needs widening. @@ -289,7 +289,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv12i1( %in, i64 0) + %res = call @llvm.vector.extract.nxv4i1.nxv12i1( %in, i64 0) ret %res } @@ -299,7 +299,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv12i1( %in, i64 4) + %res = call @llvm.vector.extract.nxv4i1.nxv12i1( %in, i64 4) ret %res } @@ -309,11 +309,11 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i1.nxv12i1( %in, i64 8) + %res = call @llvm.vector.extract.nxv4i1.nxv12i1( %in, i64 8) ret %res } -declare @llvm.experimental.vector.extract.nxv4i1.nxv12i1(, i64) +declare @llvm.vector.extract.nxv4i1.nxv12i1(, i64) ; ; Extract 1/8th i8 vector that needs promotion from legal type. @@ -325,7 +325,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 0) ret %res } @@ -336,7 +336,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 2) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 2) ret %res } @@ -347,7 +347,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 4) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 4) ret %res } @@ -358,7 +358,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 6) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 6) ret %res } @@ -369,7 +369,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 8) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 8) ret %res } @@ -380,7 +380,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 10) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 10) ret %res } @@ -391,7 +391,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 12) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 12) ret %res } @@ -402,11 +402,11 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( %in, i64 14) + %res = call @llvm.vector.extract.nxv2i8.nxv16i8( %in, i64 14) ret %res } -declare @llvm.experimental.vector.extract.nxv2i8.nxv16i8(, i64) +declare @llvm.vector.extract.nxv2i8.nxv16i8(, i64) ; ; Extract i8 vector that needs promotion from one that needs widening. @@ -417,7 +417,7 @@ ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv12i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv4i8.nxv12i8( %in, i64 0) ret %res } @@ -427,7 +427,7 @@ ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv12i8( %in, i64 4) + %res = call @llvm.vector.extract.nxv4i8.nxv12i8( %in, i64 4) ret %res } @@ -437,11 +437,11 @@ ; CHECK-NEXT: uunpkhi z0.h, z0.b ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv12i8( %in, i64 8) + %res = call @llvm.vector.extract.nxv4i8.nxv12i8( %in, i64 8) ret %res } -declare @llvm.experimental.vector.extract.nxv4i8.nxv12i8(, i64) +declare @llvm.vector.extract.nxv4i8.nxv12i8(, i64) ; ; Extract i8 vector that needs both widening + promotion from one that needs widening. @@ -452,7 +452,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6i8.nxv12i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv6i8.nxv12i8( %in, i64 0) ret %res } @@ -470,11 +470,11 @@ ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6i8.nxv12i8( %in, i64 6) + %res = call @llvm.vector.extract.nxv6i8.nxv12i8( %in, i64 6) ret %res } -declare @llvm.experimental.vector.extract.nxv6i8.nxv12i8(, i64) +declare @llvm.vector.extract.nxv6i8.nxv12i8(, i64) ; ; Extract half i8 vector that needs promotion from one that needs splitting. @@ -484,7 +484,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv8i8.nxv32i8( %in, i64 0) ret %res } @@ -493,7 +493,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.h, z0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( %in, i64 8) + %res = call @llvm.vector.extract.nxv8i8.nxv32i8( %in, i64 8) ret %res } @@ -502,7 +502,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.h, z1.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( %in, i64 16) + %res = call @llvm.vector.extract.nxv8i8.nxv32i8( %in, i64 16) ret %res } @@ -511,11 +511,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.h, z1.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( %in, i64 24) + %res = call @llvm.vector.extract.nxv8i8.nxv32i8( %in, i64 24) ret %res } -declare @llvm.experimental.vector.extract.nxv8i8.nxv32i8(, i64) +declare @llvm.vector.extract.nxv8i8.nxv32i8(, i64) ; ; Extract half i8 vector that needs promotion from legal type. @@ -525,7 +525,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv8i8.nxv16i8( %in, i64 0) ret %res } @@ -534,11 +534,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.h, z0.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( %in, i64 8) + %res = call @llvm.vector.extract.nxv8i8.nxv16i8( %in, i64 8) ret %res } -declare @llvm.experimental.vector.extract.nxv8i8.nxv16i8(, i64) +declare @llvm.vector.extract.nxv8i8.nxv16i8(, i64) ; ; Extract i8 vector that needs widening from one that needs widening. @@ -547,7 +547,7 @@ ; CHECK-LABEL: extract_nxv14i8_nxv28i8_0: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv14i8.nxv28i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv14i8.nxv28i8( %in, i64 0) ret %res } @@ -621,11 +621,11 @@ ; CHECK-NEXT: uzp1 z1.h, z1.h, z2.h ; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv14i8.nxv28i8( %in, i64 14) + %res = call @llvm.vector.extract.nxv14i8.nxv28i8( %in, i64 14) ret %res } -declare @llvm.experimental.vector.extract.nxv14i8.nxv28i8(, i64) +declare @llvm.vector.extract.nxv14i8.nxv28i8(, i64) ; ; Extract 1/4th i8 vector that needs promotion from legal type. @@ -636,7 +636,7 @@ ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( %in, i64 0) + %res = call @llvm.vector.extract.nxv4i8.nxv16i8( %in, i64 0) ret %res } @@ -646,7 +646,7 @@ ; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( %in, i64 4) + %res = call @llvm.vector.extract.nxv4i8.nxv16i8( %in, i64 4) ret %res } @@ -656,7 +656,7 @@ ; CHECK-NEXT: uunpkhi z0.h, z0.b ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( %in, i64 8) + %res = call @llvm.vector.extract.nxv4i8.nxv16i8( %in, i64 8) ret %res } @@ -666,11 +666,11 @@ ; CHECK-NEXT: uunpkhi z0.h, z0.b ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( %in, i64 12) + %res = call @llvm.vector.extract.nxv4i8.nxv16i8( %in, i64 12) ret %res } -declare @llvm.experimental.vector.extract.nxv4i8.nxv16i8(, i64) +declare @llvm.vector.extract.nxv4i8.nxv16i8(, i64) ; ; Extract f16 vector that needs promotion from one that needs widening. @@ -681,7 +681,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv6f16( %in, i64 0) + %res = call @llvm.vector.extract.nxv2f16.nxv6f16( %in, i64 0) ret %res } @@ -691,7 +691,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv6f16( %in, i64 2) + %res = call @llvm.vector.extract.nxv2f16.nxv6f16( %in, i64 2) ret %res } @@ -701,11 +701,11 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv6f16( %in, i64 4) + %res = call @llvm.vector.extract.nxv2f16.nxv6f16( %in, i64 4) ret %res } -declare @llvm.experimental.vector.extract.nxv2f16.nxv6f16(, i64) +declare @llvm.vector.extract.nxv2f16.nxv6f16(, i64) ; ; Extract half f16 vector that needs promotion from legal type. @@ -715,7 +715,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( %in, i64 0) + %res = call @llvm.vector.extract.nxv4f16.nxv8f16( %in, i64 0) ret %res } @@ -724,11 +724,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4f16.nxv8f16( %in, i64 4) + %res = call @llvm.vector.extract.nxv4f16.nxv8f16( %in, i64 4) ret %res } -declare @llvm.experimental.vector.extract.nxv4f16.nxv8f16(, i64) +declare @llvm.vector.extract.nxv4f16.nxv8f16(, i64) ; ; Extract f16 vector that needs widening from one that needs widening. @@ -737,7 +737,7 @@ ; CHECK-LABEL: extract_nxv6f16_nxv12f16_0: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6f16.nxv12f16( %in, i64 0) + %res = call @llvm.vector.extract.nxv6f16.nxv12f16( %in, i64 0) ret %res } @@ -753,11 +753,11 @@ ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6f16.nxv12f16( %in, i64 6) + %res = call @llvm.vector.extract.nxv6f16.nxv12f16( %in, i64 6) ret %res } -declare @llvm.experimental.vector.extract.nxv6f16.nxv12f16(, i64) +declare @llvm.vector.extract.nxv6f16.nxv12f16(, i64) ; ; Extract half f16 vector that needs promotion from one that needs splitting. @@ -767,7 +767,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( %in, i64 0) + %res = call @llvm.vector.extract.nxv4f16.nxv16f16( %in, i64 0) ret %res } @@ -776,7 +776,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( %in, i64 4) + %res = call @llvm.vector.extract.nxv4f16.nxv16f16( %in, i64 4) ret %res } @@ -785,7 +785,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.s, z1.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( %in, i64 8) + %res = call @llvm.vector.extract.nxv4f16.nxv16f16( %in, i64 8) ret %res } @@ -794,11 +794,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.s, z1.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4f16.nxv16f16( %in, i64 12) + %res = call @llvm.vector.extract.nxv4f16.nxv16f16( %in, i64 12) ret %res } -declare @llvm.experimental.vector.extract.nxv4f16.nxv16f16(, i64) +declare @llvm.vector.extract.nxv4f16.nxv16f16(, i64) ; ; Extract 1/4th f16 vector that needs promotion from legal type. @@ -809,7 +809,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( %in, i64 0) + %res = call @llvm.vector.extract.nxv2f16.nxv8f16( %in, i64 0) ret %res } @@ -819,7 +819,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( %in, i64 2) + %res = call @llvm.vector.extract.nxv2f16.nxv8f16( %in, i64 2) ret %res } @@ -829,7 +829,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( %in, i64 4) + %res = call @llvm.vector.extract.nxv2f16.nxv8f16( %in, i64 4) ret %res } @@ -839,11 +839,11 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2f16.nxv8f16( %in, i64 6) + %res = call @llvm.vector.extract.nxv2f16.nxv8f16( %in, i64 6) ret %res } -declare @llvm.experimental.vector.extract.nxv2f16.nxv8f16(, i64) +declare @llvm.vector.extract.nxv2f16.nxv8f16(, i64) ; ; Extract half bf16 vector that needs promotion from legal type. @@ -853,7 +853,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4bf16.nxv8bf16( %in, i64 0) + %res = call @llvm.vector.extract.nxv4bf16.nxv8bf16( %in, i64 0) ret %res } @@ -862,11 +862,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4bf16.nxv8bf16( %in, i64 4) + %res = call @llvm.vector.extract.nxv4bf16.nxv8bf16( %in, i64 4) ret %res } -declare @llvm.experimental.vector.extract.nxv4bf16.nxv8bf16(, i64) +declare @llvm.vector.extract.nxv4bf16.nxv8bf16(, i64) ; ; Extract bf16 vector that needs widening from one that needs widening. @@ -875,7 +875,7 @@ ; CHECK-LABEL: extract_nxv6bf16_nxv12bf16_0: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6bf16.nxv12bf16( %in, i64 0) + %res = call @llvm.vector.extract.nxv6bf16.nxv12bf16( %in, i64 0) ret %res } @@ -891,11 +891,11 @@ ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z0.h, z2.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6bf16.nxv12bf16( %in, i64 6) + %res = call @llvm.vector.extract.nxv6bf16.nxv12bf16( %in, i64 6) ret %res } -declare @llvm.experimental.vector.extract.nxv6bf16.nxv12bf16(, i64) +declare @llvm.vector.extract.nxv6bf16.nxv12bf16(, i64) ; ; Extract bf16 vector that needs promotion from one that needs widening. @@ -906,7 +906,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16( %in, i64 0) + %res = call @llvm.vector.extract.nxv2bf16.nxv6bf16( %in, i64 0) ret %res } @@ -916,7 +916,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16( %in, i64 2) + %res = call @llvm.vector.extract.nxv2bf16.nxv6bf16( %in, i64 2) ret %res } @@ -926,11 +926,11 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16( %in, i64 4) + %res = call @llvm.vector.extract.nxv2bf16.nxv6bf16( %in, i64 4) ret %res } -declare @llvm.experimental.vector.extract.nxv2bf16.nxv6bf16(, i64) +declare @llvm.vector.extract.nxv2bf16.nxv6bf16(, i64) ; ; Extract 1/4th bf16 vector that needs promotion from legal type. @@ -941,7 +941,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16( %in, i64 0) + %res = call @llvm.vector.extract.nxv2bf16.nxv8bf16( %in, i64 0) ret %res } @@ -951,7 +951,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16( %in, i64 2) + %res = call @llvm.vector.extract.nxv2bf16.nxv8bf16( %in, i64 2) ret %res } @@ -961,7 +961,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16( %in, i64 4) + %res = call @llvm.vector.extract.nxv2bf16.nxv8bf16( %in, i64 4) ret %res } @@ -971,11 +971,11 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16( %in, i64 6) + %res = call @llvm.vector.extract.nxv2bf16.nxv8bf16( %in, i64 6) ret %res } -declare @llvm.experimental.vector.extract.nxv2bf16.nxv8bf16(, i64) +declare @llvm.vector.extract.nxv2bf16.nxv8bf16(, i64) ; ; Extract half bf16 vector that needs promotion from one that needs splitting. @@ -985,7 +985,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16( %in, i64 0) + %res = call @llvm.vector.extract.nxv4bf16.nxv16bf16( %in, i64 0) ret %res } @@ -994,7 +994,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16( %in, i64 4) + %res = call @llvm.vector.extract.nxv4bf16.nxv16bf16( %in, i64 4) ret %res } @@ -1003,7 +1003,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpklo z0.s, z1.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16( %in, i64 8) + %res = call @llvm.vector.extract.nxv4bf16.nxv16bf16( %in, i64 8) ret %res } @@ -1012,11 +1012,11 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uunpkhi z0.s, z1.h ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16( %in, i64 12) + %res = call @llvm.vector.extract.nxv4bf16.nxv16bf16( %in, i64 12) ret %res } -declare @llvm.experimental.vector.extract.nxv4bf16.nxv16bf16(, i64) +declare @llvm.vector.extract.nxv4bf16.nxv16bf16(, i64) ; @@ -1030,7 +1030,7 @@ ; CHECK-NEXT: ret %ins = insertelement poison, float %f, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( %splat, i64 0) + %ext = call @llvm.vector.extract.nxv2f32.nxv4f32( %splat, i64 0) ret %ext } @@ -1041,7 +1041,7 @@ ; CHECK-NEXT: ret %ins = insertelement poison, float 1.0, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( %splat, i64 0) + %ext = call @llvm.vector.extract.nxv2f32.nxv4f32( %splat, i64 0) ret %ext } @@ -1052,7 +1052,7 @@ ; CHECK-NEXT: ret %ins = insertelement poison, i32 1, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( %splat, i64 0) + %ext = call @llvm.vector.extract.nxv4i32.nxv8i32( %splat, i64 0) ret %ext } @@ -1063,7 +1063,7 @@ ; CHECK-NEXT: ret %ins = insertelement poison, i1 1, i32 0 %splat = shufflevector %ins, poison, zeroinitializer - %ext = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %splat, i64 0) + %ext = call @llvm.vector.extract.nxv2i1.nxv16i1( %splat, i64 0) ret %ext } @@ -1072,9 +1072,9 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: pfalse p0.b ; CHECK-NEXT: ret - %ext = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( zeroinitializer, i64 0) + %ext = call @llvm.vector.extract.nxv2i1.nxv16i1( zeroinitializer, i64 0) ret %ext } -declare @llvm.experimental.vector.extract.nxv2f32.nxv4f32(, i64) -declare @llvm.experimental.vector.extract.nxv4i32.nxv8i32(, i64) +declare @llvm.vector.extract.nxv2f32.nxv4f32(, i64) +declare @llvm.vector.extract.nxv4i32.nxv8i32(, i64) diff --git a/llvm/test/CodeGen/AArch64/sve-extract-vector-to-predicate-store.ll b/llvm/test/CodeGen/AArch64/sve-extract-vector-to-predicate-store.ll --- a/llvm/test/CodeGen/AArch64/sve-extract-vector-to-predicate-store.ll +++ b/llvm/test/CodeGen/AArch64/sve-extract-vector-to-predicate-store.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: store %pred, * [[TMP1]] ; CHECK-NEXT: ret void %bitcast = bitcast %pred to - %extract = tail call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( %bitcast, i64 0) + %extract = tail call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %bitcast, i64 0) store <2 x i8> %extract, <2 x i8>* %addr, align 4 ret void } @@ -19,7 +19,7 @@ ; CHECK-NEXT: store %pred, * [[TMP1]] ; CHECK-NEXT: ret void %bitcast = bitcast %pred to - %extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) + %extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) store <4 x i8> %extract, <4 x i8>* %addr, align 4 ret void } @@ -30,7 +30,7 @@ ; CHECK-NEXT: store %pred, * [[TMP1]] ; CHECK-NEXT: ret void %bitcast = bitcast %pred to - %extract = tail call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8( %bitcast, i64 0) + %extract = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( %bitcast, i64 0) store <8 x i8> %extract, <8 x i8>* %addr, align 4 ret void } @@ -39,9 +39,9 @@ ; Check that too small of a vscale prevents optimization define void @pred_store_neg1( %pred, <4 x i8>* %addr) #0 { ; CHECK-LABEL: @pred_store_neg1( -; CHECK: call <4 x i8> @llvm.experimental.vector.extract +; CHECK: call <4 x i8> @llvm.vector.extract %bitcast = bitcast %pred to - %extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) + %extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) store <4 x i8> %extract, <4 x i8>* %addr, align 4 ret void } @@ -49,9 +49,9 @@ ; Check that too large of a vscale prevents optimization define void @pred_store_neg2( %pred, <4 x i8>* %addr) #2 { ; CHECK-LABEL: @pred_store_neg2( -; CHECK: call <4 x i8> @llvm.experimental.vector.extract +; CHECK: call <4 x i8> @llvm.vector.extract %bitcast = bitcast %pred to - %extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) + %extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) store <4 x i8> %extract, <4 x i8>* %addr, align 4 ret void } @@ -59,9 +59,9 @@ ; Check that a non-zero index prevents optimization define void @pred_store_neg3( %pred, <4 x i8>* %addr) #1 { ; CHECK-LABEL: @pred_store_neg3( -; CHECK: call <4 x i8> @llvm.experimental.vector.extract +; CHECK: call <4 x i8> @llvm.vector.extract %bitcast = bitcast %pred to - %extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( %bitcast, i64 4) + %extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( %bitcast, i64 4) store <4 x i8> %extract, <4 x i8>* %addr, align 4 ret void } @@ -69,16 +69,16 @@ ; Check that differing vscale min/max prevents optimization define void @pred_store_neg4( %pred, <4 x i8>* %addr) #3 { ; CHECK-LABEL: @pred_store_neg4( -; CHECK: call <4 x i8> @llvm.experimental.vector.extract +; CHECK: call <4 x i8> @llvm.vector.extract %bitcast = bitcast %pred to - %extract = tail call <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) + %extract = tail call <4 x i8> @llvm.vector.extract.v4i8.nxv2i8( %bitcast, i64 0) store <4 x i8> %extract, <4 x i8>* %addr, align 4 ret void } -declare <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8(, i64) -declare <4 x i8> @llvm.experimental.vector.extract.v4i8.nxv2i8(, i64) -declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv2i8(, i64) +declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(, i64) +declare <4 x i8> @llvm.vector.extract.v4i8.nxv2i8(, i64) +declare <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(, i64) attributes #0 = { "target-features"="+sve" vscale_range(1,1) } attributes #1 = { "target-features"="+sve" vscale_range(2,2) } diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll @@ -13,7 +13,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: zip2 v0.8b, v0.8b, v0.8b ; CHECK-NEXT: ret - %ret = call <4 x i8> @llvm.experimental.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4) + %ret = call <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4) ret <4 x i8> %ret } @@ -24,7 +24,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <8 x i8> @llvm.experimental.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8) + %ret = call <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8) ret <8 x i8> %ret } @@ -37,7 +37,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <32 x i8>, <32 x i8>* %a - %ret = call <16 x i8> @llvm.experimental.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16) + %ret = call <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16) store <16 x i8> %ret, <16 x i8>* %b ret void } @@ -60,7 +60,7 @@ ; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x1] ; VBITS_GE_512-NEXT: ret %op = load <64 x i8>, <64 x i8>* %a - %ret = call <32 x i8> @llvm.experimental.vector.extract.v32i8.v64i8(<64 x i8> %op, i64 32) + %ret = call <32 x i8> @llvm.vector.extract.v32i8.v64i8(<64 x i8> %op, i64 32) store <32 x i8> %ret, <32 x i8>* %b ret void } @@ -75,7 +75,7 @@ ; CHECK-NEXT: st1b { z0.b }, p0, [x1] ; CHECK-NEXT: ret %op = load <128 x i8>, <128 x i8>* %a - %ret = call <64 x i8> @llvm.experimental.vector.extract.v64i8.v128i8(<128 x i8> %op, i64 64) + %ret = call <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8> %op, i64 64) store <64 x i8> %ret, <64 x i8>* %b ret void } @@ -90,7 +90,7 @@ ; CHECK-NEXT: st1b { z0.b }, p0, [x1] ; CHECK-NEXT: ret %op = load <256 x i8>, <256 x i8>* %a - %ret = call <128 x i8> @llvm.experimental.vector.extract.v128i8.v256i8(<256 x i8> %op, i64 128) + %ret = call <128 x i8> @llvm.vector.extract.v128i8.v256i8(<256 x i8> %op, i64 128) store <128 x i8> %ret, <128 x i8>* %b ret void } @@ -108,7 +108,7 @@ ; CHECK-NEXT: mov v0.s[1], w9 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <2 x i16> @llvm.experimental.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2) + %ret = call <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2) ret <2 x i16> %ret } @@ -119,7 +119,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <4 x i16> @llvm.experimental.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4) + %ret = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4) ret <4 x i16> %ret } @@ -132,7 +132,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <16 x i16>, <16 x i16>* %a - %ret = call <8 x i16> @llvm.experimental.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8) + %ret = call <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8) store <8 x i16> %ret, <8 x i16>* %b ret void } @@ -155,7 +155,7 @@ ; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x1] ; VBITS_GE_512-NEXT: ret %op = load <32 x i16>, <32 x i16>* %a - %ret = call <16 x i16> @llvm.experimental.vector.extract.v16i16.v32i16(<32 x i16> %op, i64 16) + %ret = call <16 x i16> @llvm.vector.extract.v16i16.v32i16(<32 x i16> %op, i64 16) store <16 x i16> %ret, <16 x i16>* %b ret void } @@ -170,7 +170,7 @@ ; CHECK-NEXT: st1h { z0.h }, p0, [x1] ; CHECK-NEXT: ret %op = load <64 x i16>, <64 x i16>* %a - %ret = call <32 x i16> @llvm.experimental.vector.extract.v32i16.v64i16(<64 x i16> %op, i64 32) + %ret = call <32 x i16> @llvm.vector.extract.v32i16.v64i16(<64 x i16> %op, i64 32) store <32 x i16> %ret, <32 x i16>* %b ret void } @@ -185,7 +185,7 @@ ; CHECK-NEXT: st1h { z0.h }, p0, [x1] ; CHECK-NEXT: ret %op = load <128 x i16>, <128 x i16>* %a - %ret = call <64 x i16> @llvm.experimental.vector.extract.v64i16.v128i16(<128 x i16> %op, i64 64) + %ret = call <64 x i16> @llvm.vector.extract.v64i16.v128i16(<128 x i16> %op, i64 64) store <64 x i16> %ret, <64 x i16>* %b ret void } @@ -199,7 +199,7 @@ ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: dup v0.2s, v0.s[1] ; CHECK-NEXT: ret - %ret = call <1 x i32> @llvm.experimental.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1) + %ret = call <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1) ret <1 x i32> %ret } @@ -210,7 +210,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2) + %ret = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2) ret <2 x i32> %ret } @@ -223,7 +223,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <8 x i32>, <8 x i32>* %a - %ret = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4) + %ret = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4) store <4 x i32> %ret, <4 x i32>* %b ret void } @@ -246,7 +246,7 @@ ; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1] ; VBITS_GE_512-NEXT: ret %op = load <16 x i32>, <16 x i32>* %a - %ret = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v16i32(<16 x i32> %op, i64 8) + %ret = call <8 x i32> @llvm.vector.extract.v8i32.v16i32(<16 x i32> %op, i64 8) store <8 x i32> %ret, <8 x i32>* %b ret void } @@ -261,7 +261,7 @@ ; CHECK-NEXT: st1w { z0.s }, p0, [x1] ; CHECK-NEXT: ret %op = load <32 x i32>, <32 x i32>* %a - %ret = call <16 x i32> @llvm.experimental.vector.extract.v16i32.v32i32(<32 x i32> %op, i64 16) + %ret = call <16 x i32> @llvm.vector.extract.v16i32.v32i32(<32 x i32> %op, i64 16) store <16 x i32> %ret, <16 x i32>* %b ret void } @@ -276,7 +276,7 @@ ; CHECK-NEXT: st1w { z0.s }, p0, [x1] ; CHECK-NEXT: ret %op = load <64 x i32>, <64 x i32>* %a - %ret = call <32 x i32> @llvm.experimental.vector.extract.v32i32.v64i32(<64 x i32> %op, i64 32) + %ret = call <32 x i32> @llvm.vector.extract.v32i32.v64i32(<64 x i32> %op, i64 32) store <32 x i32> %ret, <32 x i32>* %b ret void } @@ -290,7 +290,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <1 x i64> @llvm.experimental.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1) + %ret = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1) ret <1 x i64> %ret } @@ -303,7 +303,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <4 x i64>, <4 x i64>* %a - %ret = call <2 x i64> @llvm.experimental.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2) + %ret = call <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2) store <2 x i64> %ret, <2 x i64>* %b ret void } @@ -317,7 +317,7 @@ ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret %op = load <8 x i64>, <8 x i64>* %a - %ret = call <4 x i64> @llvm.experimental.vector.extract.v4i64.v8i64(<8 x i64> %op, i64 4) + %ret = call <4 x i64> @llvm.vector.extract.v4i64.v8i64(<8 x i64> %op, i64 4) store <4 x i64> %ret, <4 x i64>* %b ret void } @@ -335,7 +335,7 @@ ; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_256-NEXT: ret %op = load <16 x i64>, <16 x i64>* %a - %ret = call <8 x i64> @llvm.experimental.vector.extract.v8i64.v16i64(<16 x i64> %op, i64 8) + %ret = call <8 x i64> @llvm.vector.extract.v8i64.v16i64(<16 x i64> %op, i64 8) store <8 x i64> %ret, <8 x i64>* %b ret void } @@ -349,7 +349,7 @@ ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret %op = load <32 x i64>, <32 x i64>* %a - %ret = call <16 x i64> @llvm.experimental.vector.extract.v16i64.v32i64(<32 x i64> %op, i64 16) + %ret = call <16 x i64> @llvm.vector.extract.v16i64.v32i64(<32 x i64> %op, i64 16) store <16 x i64> %ret, <16 x i64>* %b ret void } @@ -363,7 +363,7 @@ ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: dup v0.2s, v0.s[1] ; CHECK-NEXT: ret - %ret = call <2 x half> @llvm.experimental.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2) + %ret = call <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2) ret <2 x half> %ret } @@ -374,7 +374,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <4 x half> @llvm.experimental.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4) + %ret = call <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4) ret <4 x half> %ret } @@ -387,7 +387,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <16 x half>, <16 x half>* %a - %ret = call <8 x half> @llvm.experimental.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8) + %ret = call <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8) store <8 x half> %ret, <8 x half>* %b ret void } @@ -410,7 +410,7 @@ ; VBITS_GE_512-NEXT: st1h { z0.h }, p0, [x1] ; VBITS_GE_512-NEXT: ret %op = load <32 x half>, <32 x half>* %a - %ret = call <16 x half> @llvm.experimental.vector.extract.v16f16.v32f16(<32 x half> %op, i64 16) + %ret = call <16 x half> @llvm.vector.extract.v16f16.v32f16(<32 x half> %op, i64 16) store <16 x half> %ret, <16 x half>* %b ret void } @@ -425,7 +425,7 @@ ; CHECK-NEXT: st1h { z0.h }, p0, [x1] ; CHECK-NEXT: ret %op = load <64 x half>, <64 x half>* %a - %ret = call <32 x half> @llvm.experimental.vector.extract.v32f16.v64f16(<64 x half> %op, i64 32) + %ret = call <32 x half> @llvm.vector.extract.v32f16.v64f16(<64 x half> %op, i64 32) store <32 x half> %ret, <32 x half>* %b ret void } @@ -440,7 +440,7 @@ ; CHECK-NEXT: st1h { z0.h }, p0, [x1] ; CHECK-NEXT: ret %op = load <128 x half>, <128 x half>* %a - %ret = call <64 x half> @llvm.experimental.vector.extract.v64f16.v128f16(<128 x half> %op, i64 64) + %ret = call <64 x half> @llvm.vector.extract.v64f16.v128f16(<128 x half> %op, i64 64) store <64 x half> %ret, <64 x half>* %b ret void } @@ -454,7 +454,7 @@ ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0 ; CHECK-NEXT: dup v0.2s, v0.s[1] ; CHECK-NEXT: ret - %ret = call <1 x float> @llvm.experimental.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1) + %ret = call <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1) ret <1 x float> %ret } @@ -465,7 +465,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <2 x float> @llvm.experimental.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2) + %ret = call <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2) ret <2 x float> %ret } @@ -478,7 +478,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <8 x float>, <8 x float>* %a - %ret = call <4 x float> @llvm.experimental.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4) + %ret = call <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4) store <4 x float> %ret, <4 x float>* %b ret void } @@ -501,7 +501,7 @@ ; VBITS_GE_512-NEXT: st1w { z0.s }, p0, [x1] ; VBITS_GE_512-NEXT: ret %op = load <16 x float>, <16 x float>* %a - %ret = call <8 x float> @llvm.experimental.vector.extract.v8f32.v16f32(<16 x float> %op, i64 8) + %ret = call <8 x float> @llvm.vector.extract.v8f32.v16f32(<16 x float> %op, i64 8) store <8 x float> %ret, <8 x float>* %b ret void } @@ -516,7 +516,7 @@ ; CHECK-NEXT: st1w { z0.s }, p0, [x1] ; CHECK-NEXT: ret %op = load <32 x float>, <32 x float>* %a - %ret = call <16 x float> @llvm.experimental.vector.extract.v16f32.v32f32(<32 x float> %op, i64 16) + %ret = call <16 x float> @llvm.vector.extract.v16f32.v32f32(<32 x float> %op, i64 16) store <16 x float> %ret, <16 x float>* %b ret void } @@ -531,7 +531,7 @@ ; CHECK-NEXT: st1w { z0.s }, p0, [x1] ; CHECK-NEXT: ret %op = load <64 x float>, <64 x float>* %a - %ret = call <32 x float> @llvm.experimental.vector.extract.v32f32.v64f32(<64 x float> %op, i64 32) + %ret = call <32 x float> @llvm.vector.extract.v32f32.v64f32(<64 x float> %op, i64 32) store <32 x float> %ret, <32 x float>* %b ret void } @@ -545,7 +545,7 @@ ; CHECK-NEXT: ext v0.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0 ; CHECK-NEXT: ret - %ret = call <1 x double> @llvm.experimental.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1) + %ret = call <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1) ret <1 x double> %ret } @@ -558,7 +558,7 @@ ; CHECK-NEXT: str q0, [x1] ; CHECK-NEXT: ret %op = load <4 x double>, <4 x double>* %a - %ret = call <2 x double> @llvm.experimental.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2) + %ret = call <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2) store <2 x double> %ret, <2 x double>* %b ret void } @@ -581,7 +581,7 @@ ; VBITS_GE_512-NEXT: st1d { z0.d }, p0, [x1] ; VBITS_GE_512-NEXT: ret %op = load <8 x double>, <8 x double>* %a - %ret = call <4 x double> @llvm.experimental.vector.extract.v4f64.v8f64(<8 x double> %op, i64 4) + %ret = call <4 x double> @llvm.vector.extract.v4f64.v8f64(<8 x double> %op, i64 4) store <4 x double> %ret, <4 x double>* %b ret void } @@ -596,7 +596,7 @@ ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret %op = load <16 x double>, <16 x double>* %a - %ret = call <8 x double> @llvm.experimental.vector.extract.v8f64.v16f64(<16 x double> %op, i64 8) + %ret = call <8 x double> @llvm.vector.extract.v8f64.v16f64(<16 x double> %op, i64 8) store <8 x double> %ret, <8 x double>* %b ret void } @@ -611,56 +611,56 @@ ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret %op = load <32 x double>, <32 x double>* %a - %ret = call <16 x double> @llvm.experimental.vector.extract.v16f64.v32f64(<32 x double> %op, i64 16) + %ret = call <16 x double> @llvm.vector.extract.v16f64.v32f64(<32 x double> %op, i64 16) store <16 x double> %ret, <16 x double>* %b ret void } -declare <4 x i8> @llvm.experimental.vector.extract.v4i8.v8i8(<8 x i8>, i64) -declare <8 x i8> @llvm.experimental.vector.extract.v8i8.v16i8(<16 x i8>, i64) -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.v32i8(<32 x i8>, i64) -declare <32 x i8> @llvm.experimental.vector.extract.v32i8.v64i8(<64 x i8>, i64) -declare <64 x i8> @llvm.experimental.vector.extract.v64i8.v128i8(<128 x i8>, i64) -declare <128 x i8> @llvm.experimental.vector.extract.v128i8.v256i8(<256 x i8>, i64) - -declare <2 x i16> @llvm.experimental.vector.extract.v2i16.v4i16(<4 x i16>, i64) -declare <4 x i16> @llvm.experimental.vector.extract.v4i16.v8i16(<8 x i16>, i64) -declare <8 x i16> @llvm.experimental.vector.extract.v8i16.v16i16(<16 x i16>, i64) -declare <16 x i16> @llvm.experimental.vector.extract.v16i16.v32i16(<32 x i16>, i64) -declare <32 x i16> @llvm.experimental.vector.extract.v32i16.v64i16(<64 x i16>, i64) -declare <64 x i16> @llvm.experimental.vector.extract.v64i16.v128i16(<128 x i16>, i64) - -declare <1 x i32> @llvm.experimental.vector.extract.v1i32.v2i32(<2 x i32>, i64) -declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<4 x i32>, i64) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32>, i64) -declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v16i32(<16 x i32>, i64) -declare <16 x i32> @llvm.experimental.vector.extract.v16i32.v32i32(<32 x i32>, i64) -declare <32 x i32> @llvm.experimental.vector.extract.v32i32.v64i32(<64 x i32>, i64) - -declare <1 x i64> @llvm.experimental.vector.extract.v1i64.v2i64(<2 x i64>, i64) -declare <2 x i64> @llvm.experimental.vector.extract.v2i64.v4i64(<4 x i64>, i64) -declare <4 x i64> @llvm.experimental.vector.extract.v4i64.v8i64(<8 x i64>, i64) -declare <8 x i64> @llvm.experimental.vector.extract.v8i64.v16i64(<16 x i64>, i64) -declare <16 x i64> @llvm.experimental.vector.extract.v16i64.v32i64(<32 x i64>, i64) - -declare <2 x half> @llvm.experimental.vector.extract.v2f16.v4f16(<4 x half>, i64) -declare <4 x half> @llvm.experimental.vector.extract.v4f16.v8f16(<8 x half>, i64) -declare <8 x half> @llvm.experimental.vector.extract.v8f16.v16f16(<16 x half>, i64) -declare <16 x half> @llvm.experimental.vector.extract.v16f16.v32f16(<32 x half>, i64) -declare <32 x half> @llvm.experimental.vector.extract.v32f16.v64f16(<64 x half>, i64) -declare <64 x half> @llvm.experimental.vector.extract.v64f16.v128f16(<128 x half>, i64) - -declare <1 x float> @llvm.experimental.vector.extract.v1f32.v2f32(<2 x float>, i64) -declare <2 x float> @llvm.experimental.vector.extract.v2f32.v4f32(<4 x float>, i64) -declare <4 x float> @llvm.experimental.vector.extract.v4f32.v8f32(<8 x float>, i64) -declare <8 x float> @llvm.experimental.vector.extract.v8f32.v16f32(<16 x float>, i64) -declare <16 x float> @llvm.experimental.vector.extract.v16f32.v32f32(<32 x float>, i64) -declare <32 x float> @llvm.experimental.vector.extract.v32f32.v64f32(<64 x float>, i64) - -declare <1 x double> @llvm.experimental.vector.extract.v1f64.v2f64(<2 x double>, i64) -declare <2 x double> @llvm.experimental.vector.extract.v2f64.v4f64(<4 x double>, i64) -declare <4 x double> @llvm.experimental.vector.extract.v4f64.v8f64(<8 x double>, i64) -declare <8 x double> @llvm.experimental.vector.extract.v8f64.v16f64(<16 x double>, i64) -declare <16 x double> @llvm.experimental.vector.extract.v16f64.v32f64(<32 x double>, i64) +declare <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8>, i64) +declare <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8>, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8>, i64) +declare <32 x i8> @llvm.vector.extract.v32i8.v64i8(<64 x i8>, i64) +declare <64 x i8> @llvm.vector.extract.v64i8.v128i8(<128 x i8>, i64) +declare <128 x i8> @llvm.vector.extract.v128i8.v256i8(<256 x i8>, i64) + +declare <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16>, i64) +declare <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16>, i64) +declare <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16>, i64) +declare <16 x i16> @llvm.vector.extract.v16i16.v32i16(<32 x i16>, i64) +declare <32 x i16> @llvm.vector.extract.v32i16.v64i16(<64 x i16>, i64) +declare <64 x i16> @llvm.vector.extract.v64i16.v128i16(<128 x i16>, i64) + +declare <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32>, i64) +declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32>, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64) +declare <8 x i32> @llvm.vector.extract.v8i32.v16i32(<16 x i32>, i64) +declare <16 x i32> @llvm.vector.extract.v16i32.v32i32(<32 x i32>, i64) +declare <32 x i32> @llvm.vector.extract.v32i32.v64i32(<64 x i32>, i64) + +declare <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64>, i64) +declare <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64>, i64) +declare <4 x i64> @llvm.vector.extract.v4i64.v8i64(<8 x i64>, i64) +declare <8 x i64> @llvm.vector.extract.v8i64.v16i64(<16 x i64>, i64) +declare <16 x i64> @llvm.vector.extract.v16i64.v32i64(<32 x i64>, i64) + +declare <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half>, i64) +declare <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half>, i64) +declare <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half>, i64) +declare <16 x half> @llvm.vector.extract.v16f16.v32f16(<32 x half>, i64) +declare <32 x half> @llvm.vector.extract.v32f16.v64f16(<64 x half>, i64) +declare <64 x half> @llvm.vector.extract.v64f16.v128f16(<128 x half>, i64) + +declare <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float>, i64) +declare <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float>, i64) +declare <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float>, i64) +declare <8 x float> @llvm.vector.extract.v8f32.v16f32(<16 x float>, i64) +declare <16 x float> @llvm.vector.extract.v16f32.v32f32(<32 x float>, i64) +declare <32 x float> @llvm.vector.extract.v32f32.v64f32(<64 x float>, i64) + +declare <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double>, i64) +declare <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double>, i64) +declare <4 x double> @llvm.vector.extract.v4f64.v8f64(<8 x double>, i64) +declare <8 x double> @llvm.vector.extract.v8f64.v16f64(<16 x double>, i64) +declare <16 x double> @llvm.vector.extract.v16f64.v32f64(<32 x double>, i64) attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll --- a/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector-to-predicate-load.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]] ; CHECK-NEXT: ret [[TMP2]] %load = load <2 x i8>, <2 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -19,7 +19,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]] ; CHECK-NEXT: ret [[TMP2]] %load = load <4 x i8>, <4 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -30,7 +30,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]] ; CHECK-NEXT: ret [[TMP2]] %load = load <8 x i8>, <8 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v8i8( undef, <8 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -49,7 +49,7 @@ br label %bb1 bb1: - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v2i8( undef, <2 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -57,9 +57,9 @@ ; Check that too small of a vscale prevents optimization define @pred_load_neg1(<4 x i8>* %addr) #0 { ; CHECK-LABEL: @pred_load_neg1( -; CHECK: call @llvm.experimental.vector.insert +; CHECK: call @llvm.vector.insert %load = load <4 x i8>, <4 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -67,9 +67,9 @@ ; Check that too large of a vscale prevents optimization define @pred_load_neg2(<4 x i8>* %addr) #2 { ; CHECK-LABEL: @pred_load_neg2( -; CHECK: call @llvm.experimental.vector.insert +; CHECK: call @llvm.vector.insert %load = load <4 x i8>, <4 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -77,9 +77,9 @@ ; Check that a non-zero index prevents optimization define @pred_load_neg3(<4 x i8>* %addr) #1 { ; CHECK-LABEL: @pred_load_neg3( -; CHECK: call @llvm.experimental.vector.insert +; CHECK: call @llvm.vector.insert %load = load <4 x i8>, <4 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 4) + %insert = tail call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 4) %ret = bitcast %insert to ret %ret } @@ -87,9 +87,9 @@ ; Check that differing vscale min/max prevents optimization define @pred_load_neg4(<4 x i8>* %addr) #3 { ; CHECK-LABEL: @pred_load_neg4( -; CHECK: call @llvm.experimental.vector.insert +; CHECK: call @llvm.vector.insert %load = load <4 x i8>, <4 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v4i8( undef, <4 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } @@ -97,16 +97,16 @@ ; Check that insertion into a non-undef vector prevents optimization define @pred_load_neg5(<4 x i8>* %addr, %passthru) #1 { ; CHECK-LABEL: @pred_load_neg5( -; CHECK: call @llvm.experimental.vector.insert +; CHECK: call @llvm.vector.insert %load = load <4 x i8>, <4 x i8>* %addr, align 4 - %insert = tail call @llvm.experimental.vector.insert.nxv2i8.v4i8( %passthru, <4 x i8> %load, i64 0) + %insert = tail call @llvm.vector.insert.nxv2i8.v4i8( %passthru, <4 x i8> %load, i64 0) %ret = bitcast %insert to ret %ret } -declare @llvm.experimental.vector.insert.nxv2i8.v2i8(, <2 x i8>, i64) -declare @llvm.experimental.vector.insert.nxv2i8.v4i8(, <4 x i8>, i64) -declare @llvm.experimental.vector.insert.nxv2i8.v8i8(, <8 x i8>, i64) +declare @llvm.vector.insert.nxv2i8.v2i8(, <2 x i8>, i64) +declare @llvm.vector.insert.nxv2i8.v4i8(, <4 x i8>, i64) +declare @llvm.vector.insert.nxv2i8.v8i8(, <8 x i8>, i64) attributes #0 = { "target-features"="+sve" vscale_range(1,1) } attributes #1 = { "target-features"="+sve" vscale_range(2,2) } diff --git a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll --- a/llvm/test/CodeGen/AArch64/sve-insert-vector.ll +++ b/llvm/test/CodeGen/AArch64/sve-insert-vector.ll @@ -8,7 +8,7 @@ ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z0.d, p0/m, z1.d ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv2i64.v2i64( %vec, <2 x i64> %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv2i64.v2i64( %vec, <2 x i64> %subvec, i64 0) ret %retval } @@ -31,7 +31,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv2i64.v2i64( %vec, <2 x i64> %subvec, i64 2) + %retval = call @llvm.vector.insert.nxv2i64.v2i64( %vec, <2 x i64> %subvec, i64 2) ret %retval } @@ -42,7 +42,7 @@ ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z0.s, p0/m, z1.s ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 0) ret %retval } @@ -65,7 +65,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 4) + %retval = call @llvm.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 4) ret %retval } @@ -76,7 +76,7 @@ ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z0.h, p0/m, z1.h ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv8i16.v8i16( %vec, <8 x i16> %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv8i16.v8i16( %vec, <8 x i16> %subvec, i64 0) ret %retval } @@ -99,7 +99,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv8i16.v8i16( %vec, <8 x i16> %subvec, i64 8) + %retval = call @llvm.vector.insert.nxv8i16.v8i16( %vec, <8 x i16> %subvec, i64 8) ret %retval } @@ -110,7 +110,7 @@ ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z0.b, p0/m, z1.b ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv16i8.v16i8( %vec, <16 x i8> %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv16i8.v16i8( %vec, <16 x i8> %subvec, i64 0) ret %retval } @@ -132,7 +132,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv16i8.v16i8( %vec, <16 x i8> %subvec, i64 16) + %retval = call @llvm.vector.insert.nxv16i8.v16i8( %vec, <16 x i8> %subvec, i64 16) ret %retval } @@ -152,8 +152,8 @@ ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl] ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) - %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( %v0, %sv1, i64 8) + %v0 = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) + %v = call @llvm.vector.insert.nxv8i64.nxv16i64( %v0, %sv1, i64 8) store %v, * %out ret void } @@ -167,7 +167,7 @@ ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #1, mul vl] ; CHECK-NEXT: st1d { z0.d }, p0, [x0] ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) + %v = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) store %v, * %out ret void } @@ -181,7 +181,7 @@ ; CHECK-NEXT: st1d { z1.d }, p0, [x0, #5, mul vl] ; CHECK-NEXT: st1d { z0.d }, p0, [x0, #4, mul vl] ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 8) + %v = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 8) store %v, * %out ret void } @@ -212,8 +212,8 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: .cfi_restore w29 ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv0, i64 0) - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( %v0, <2 x i64> %sv1, i64 4) + %v0 = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv0, i64 0) + %v = call @llvm.vector.insert.v2i64.nxv16i64( %v0, <2 x i64> %sv1, i64 4) store %v, * %out ret void } @@ -226,7 +226,7 @@ ; CHECK-NEXT: st1d { z0.d }, p0, [x1] ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 0) + %v = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 0) store %v, * %out ret void } @@ -253,7 +253,7 @@ ; CHECK-NEXT: .cfi_restore w29 ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 2) + %v = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 2) store %v, * %out ret void } @@ -269,7 +269,7 @@ entry: %0 = insertelement undef, i32 1, i32 0 %subvec = shufflevector %0, undef, zeroinitializer - %retval = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv4i32.nxv1i32( undef, %subvec, i64 0) ret %retval } @@ -281,7 +281,7 @@ entry: %0 = insertelement undef, i16 1, i32 0 %subvec = shufflevector %0, undef, zeroinitializer - %retval = call @llvm.experimental.vector.insert.nxv6i16.nxv1i16( undef, %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv6i16.nxv1i16( undef, %subvec, i64 0) ret %retval } @@ -291,7 +291,7 @@ ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ret entry: - %retval = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv4f32.nxv1f32( undef, %subvec, i64 0) ret %retval } @@ -305,7 +305,7 @@ ; CHECK-NEXT: uzp1 z1.s, z2.s, z1.s ; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h ; CHECK-NEXT: ret - %r = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( %vec, %in, i64 2) + %r = call @llvm.vector.insert.nxv8i16.nxv2i16( %vec, %in, i64 2) ret %r } @@ -315,7 +315,7 @@ ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv4f16.nxv2f16( %sv0, %sv1, i64 0) ret %v0 } @@ -325,7 +325,7 @@ ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( %sv0, %sv1, i64 2) + %v0 = call @llvm.vector.insert.nxv4f16.nxv2f16( %sv0, %sv1, i64 2) ret %v0 } @@ -343,7 +343,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %r = call @llvm.experimental.vector.insert.nxv8f16.nxv2f16( %vec, %in, i64 2) + %r = call @llvm.vector.insert.nxv8f16.nxv2f16( %vec, %in, i64 2) ret %r } @@ -353,7 +353,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv8f16.nxv4f16( %sv0, %sv1, i64 0) ret %v0 } @@ -363,7 +363,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( %sv0, %sv1, i64 4) + %v0 = call @llvm.vector.insert.nxv8f16.nxv4f16( %sv0, %sv1, i64 4) ret %v0 } @@ -388,7 +388,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %retval = call @llvm.experimental.vector.insert.nxv2i64.v2i64( %vec, <2 x i64> %subvec, i64 2) + %retval = call @llvm.vector.insert.nxv2i64.v2i64( %vec, <2 x i64> %subvec, i64 2) ret %retval } @@ -413,7 +413,7 @@ ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %subvec = load <4 x i64>, <4 x i64>* %ptr - %retval = call @llvm.experimental.vector.insert.nxv2i64.v4i64( %vec, <4 x i64> %subvec, i64 4) + %retval = call @llvm.vector.insert.nxv2i64.v4i64( %vec, <4 x i64> %subvec, i64 4) ret %retval } @@ -426,7 +426,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv3i32.nxv2i32( undef, %sv0, i64 0) + %v0 = call @llvm.vector.insert.nxv3i32.nxv2i32( undef, %sv0, i64 0) ret %v0 } @@ -437,7 +437,7 @@ ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv3i32.nxv2i32( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv3i32.nxv2i32( %sv0, %sv1, i64 0) ret %v0 } @@ -446,7 +446,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv3f32.nxv2f32( undef, %sv0, i64 0) + %v0 = call @llvm.vector.insert.nxv3f32.nxv2f32( undef, %sv0, i64 0) ret %v0 } @@ -456,7 +456,7 @@ ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv4f32.nxv2f32( %sv0, %sv1, i64 0) ret %v0 } @@ -466,7 +466,7 @@ ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( %sv0, %sv1, i64 2) + %v0 = call @llvm.vector.insert.nxv4f32.nxv2f32( %sv0, %sv1, i64 2) ret %v0 } @@ -482,8 +482,8 @@ ; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv6i32.nxv2i32( undef, %sv0, i64 0) - %v1 = call @llvm.experimental.vector.insert.nxv6i32.nxv2i32( %v0, %sv1, i64 2) + %v0 = call @llvm.vector.insert.nxv6i32.nxv2i32( undef, %sv0, i64 0) + %v1 = call @llvm.vector.insert.nxv6i32.nxv2i32( %v0, %sv1, i64 2) ret %v1 } @@ -492,7 +492,7 @@ ; CHECK-LABEL: insert_nxv6i32_nxv3i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv6i32.nxv3i32( undef, %sv0, i64 0) + %v0 = call @llvm.vector.insert.nxv6i32.nxv3i32( undef, %sv0, i64 0) ret %v0 } @@ -500,9 +500,9 @@ ; CHECK-LABEL: insert_nxv12i32_nxv4i32: ; CHECK: // %bb.0: ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4i32.nxv12i32( undef, %sv0, i64 0) - %v1 = call @llvm.experimental.vector.insert.nxv4i32.nxv12i32( %v0, %sv1, i64 4) - %v2 = call @llvm.experimental.vector.insert.nxv4i32.nxv12i32( %v1, %sv2, i64 8) + %v0 = call @llvm.vector.insert.nxv4i32.nxv12i32( undef, %sv0, i64 0) + %v1 = call @llvm.vector.insert.nxv4i32.nxv12i32( %v0, %sv1, i64 4) + %v2 = call @llvm.vector.insert.nxv4i32.nxv12i32( %v1, %sv2, i64 8) ret %v2 } @@ -511,7 +511,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv2bf16.nxv2bf16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv2bf16.nxv2bf16( %sv0, %sv1, i64 0) ret %v0 } @@ -520,7 +520,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4bf16.nxv4bf16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv4bf16.nxv4bf16( %sv0, %sv1, i64 0) ret %v0 } @@ -537,7 +537,7 @@ ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4bf16.v4bf16( %sv0, <4 x bfloat> %v1, i64 0) + %v0 = call @llvm.vector.insert.nxv4bf16.v4bf16( %sv0, <4 x bfloat> %v1, i64 0) ret %v0 } @@ -546,7 +546,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8bf16.nxv8bf16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv8bf16.nxv8bf16( %sv0, %sv1, i64 0) ret %v0 } @@ -557,7 +557,7 @@ ; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1 ; CHECK-NEXT: mov z0.h, p0/m, z1.h ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( %sv0, <8 x bfloat> %v1, i64 0) + %v0 = call @llvm.vector.insert.nxv8bf16.v8bf16( %sv0, <8 x bfloat> %v1, i64 0) ret %v0 } @@ -567,7 +567,7 @@ ; CHECK-NEXT: uunpkhi z0.s, z0.h ; CHECK-NEXT: uzp1 z0.h, z1.h, z0.h ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8bf16.nxv4bf16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv8bf16.nxv4bf16( %sv0, %sv1, i64 0) ret %v0 } @@ -577,7 +577,7 @@ ; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8bf16.nxv4bf16( %sv0, %sv1, i64 4) + %v0 = call @llvm.vector.insert.nxv8bf16.nxv4bf16( %sv0, %sv1, i64 4) ret %v0 } @@ -587,7 +587,7 @@ ; CHECK-NEXT: uunpkhi z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z1.s, z0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4bf16.nxv2bf16( %sv0, %sv1, i64 0) + %v0 = call @llvm.vector.insert.nxv4bf16.nxv2bf16( %sv0, %sv1, i64 0) ret %v0 } @@ -597,7 +597,7 @@ ; CHECK-NEXT: uunpklo z0.d, z0.s ; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4bf16.nxv2bf16( %sv0, %sv1, i64 2) + %v0 = call @llvm.vector.insert.nxv4bf16.nxv2bf16( %sv0, %sv1, i64 2) ret %v0 } @@ -608,7 +608,7 @@ ; CHECK-NEXT: punpkhi p0.h, p0.b ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nx16i1.nxv8i1( %vec, %sv, i64 0) + %v0 = call @llvm.vector.insert.nx16i1.nxv8i1( %vec, %sv, i64 0) ret %v0 } @@ -618,7 +618,7 @@ ; CHECK-NEXT: punpklo p0.h, p0.b ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nx16i1.nxv8i1( %vec, %sv, i64 8) + %v0 = call @llvm.vector.insert.nx16i1.nxv8i1( %vec, %sv, i64 8) ret %v0 } @@ -632,7 +632,7 @@ ; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h ; CHECK-NEXT: uzp1 p0.b, p1.b, p0.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nx16i1.nxv4i1( %vec, %sv, i64 0) + %v0 = call @llvm.vector.insert.nx16i1.nxv4i1( %vec, %sv, i64 0) ret %v0 } @@ -645,7 +645,7 @@ ; CHECK-NEXT: uzp1 p1.h, p2.h, p1.h ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nx16i1.nxv4i1( %vec, %sv, i64 12) + %v0 = call @llvm.vector.insert.nx16i1.nxv4i1( %vec, %sv, i64 12) ret %v0 } @@ -657,7 +657,7 @@ ; CHECK-NEXT: uzp1 p0.h, p0.h, p1.h ; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nx16i1.nxv4i1( zeroinitializer, %sv, i64 0) + %v0 = call @llvm.vector.insert.nx16i1.nxv4i1( zeroinitializer, %sv, i64 0) ret %v0 } @@ -667,7 +667,7 @@ ; CHECK-NEXT: uzp1 p0.h, p0.h, p0.h ; CHECK-NEXT: uzp1 p0.b, p0.b, p0.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nx16i1.nxv4i1( poison, %sv, i64 0) + %v0 = call @llvm.vector.insert.nx16i1.nxv4i1( poison, %sv, i64 0) ret %v0 } @@ -677,7 +677,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv2i1.v8i1 ( undef, <8 x i1> , i64 0) + %v0 = call @llvm.vector.insert.nxv2i1.v8i1 ( undef, <8 x i1> , i64 0) ret %v0 } @@ -686,7 +686,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv4i1.v16i1 ( undef, <16 x i1> , i64 0) + %v0 = call @llvm.vector.insert.nxv4i1.v16i1 ( undef, <16 x i1> , i64 0) ret %v0 } @@ -695,7 +695,7 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8i1.v32i1 ( undef, <32 x i1> , i64 0) + %v0 = call @llvm.vector.insert.nxv8i1.v32i1 ( undef, <32 x i1> , i64 0) ret %v0 } @@ -704,49 +704,49 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.b ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv16i1.v64i1 ( undef, <64 x i1> , i64 0) + %v0 = call @llvm.vector.insert.nxv16i1.v64i1 ( undef, <64 x i1> , i64 0) ret %v0 } attributes #0 = { vscale_range(2,2) } -declare @llvm.experimental.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) - -declare @llvm.experimental.vector.insert.nxv6i16.nxv1i16(, , i64) -declare @llvm.experimental.vector.insert.nxv8i16.nxv2i16(, , i64) -declare @llvm.experimental.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) - -declare @llvm.experimental.vector.insert.nxv3i32.nxv2i32(, , i64) -declare @llvm.experimental.vector.insert.nxv4i32.nxv1i32(, , i64) -declare @llvm.experimental.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) -declare @llvm.experimental.vector.insert.nxv4i32.nxv12i32(, , i64) -declare @llvm.experimental.vector.insert.nxv6i32.nxv2i32(, , i64) -declare @llvm.experimental.vector.insert.nxv6i32.nxv3i32(, , i64) - -declare @llvm.experimental.vector.insert.nxv2bf16.nxv2bf16(, , i64) -declare @llvm.experimental.vector.insert.nxv4bf16.nxv2bf16(, , i64) -declare @llvm.experimental.vector.insert.nxv4bf16.nxv4bf16(, , i64) -declare @llvm.experimental.vector.insert.nxv4bf16.v4bf16(, <4 x bfloat>, i64) -declare @llvm.experimental.vector.insert.nxv8bf16.nxv8bf16(, , i64) -declare @llvm.experimental.vector.insert.nxv8bf16.nxv4bf16(, , i64) -declare @llvm.experimental.vector.insert.nxv8bf16.v8bf16(, <8 x bfloat>, i64) - -declare @llvm.experimental.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) -declare @llvm.experimental.vector.insert.nxv2i64.v4i64(, <4 x i64>, i64) -declare @llvm.experimental.vector.insert.nxv8i64.nxv16i64(, , i64) -declare @llvm.experimental.vector.insert.v2i64.nxv16i64(, <2 x i64>, i64) - -declare @llvm.experimental.vector.insert.nxv4f16.nxv2f16(, , i64) -declare @llvm.experimental.vector.insert.nxv8f16.nxv2f16(, , i64) -declare @llvm.experimental.vector.insert.nxv8f16.nxv4f16(, , i64) - -declare @llvm.experimental.vector.insert.nxv3f32.nxv2f32(, , i64) -declare @llvm.experimental.vector.insert.nxv4f32.nxv1f32(, , i64) -declare @llvm.experimental.vector.insert.nxv4f32.nxv2f32(, , i64) - -declare @llvm.experimental.vector.insert.nxv2i1.v8i1(, <8 x i1>, i64) -declare @llvm.experimental.vector.insert.nxv4i1.v16i1(, <16 x i1>, i64) -declare @llvm.experimental.vector.insert.nxv8i1.v32i1(, <32 x i1>, i64) -declare @llvm.experimental.vector.insert.nx16i1.nxv4i1(, , i64) -declare @llvm.experimental.vector.insert.nx16i1.nxv8i1(, , i64) -declare @llvm.experimental.vector.insert.nxv16i1.v64i1(, <64 x i1>, i64) +declare @llvm.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) + +declare @llvm.vector.insert.nxv6i16.nxv1i16(, , i64) +declare @llvm.vector.insert.nxv8i16.nxv2i16(, , i64) +declare @llvm.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) + +declare @llvm.vector.insert.nxv3i32.nxv2i32(, , i64) +declare @llvm.vector.insert.nxv4i32.nxv1i32(, , i64) +declare @llvm.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) +declare @llvm.vector.insert.nxv4i32.nxv12i32(, , i64) +declare @llvm.vector.insert.nxv6i32.nxv2i32(, , i64) +declare @llvm.vector.insert.nxv6i32.nxv3i32(, , i64) + +declare @llvm.vector.insert.nxv2bf16.nxv2bf16(, , i64) +declare @llvm.vector.insert.nxv4bf16.nxv2bf16(, , i64) +declare @llvm.vector.insert.nxv4bf16.nxv4bf16(, , i64) +declare @llvm.vector.insert.nxv4bf16.v4bf16(, <4 x bfloat>, i64) +declare @llvm.vector.insert.nxv8bf16.nxv8bf16(, , i64) +declare @llvm.vector.insert.nxv8bf16.nxv4bf16(, , i64) +declare @llvm.vector.insert.nxv8bf16.v8bf16(, <8 x bfloat>, i64) + +declare @llvm.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) +declare @llvm.vector.insert.nxv2i64.v4i64(, <4 x i64>, i64) +declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64) +declare @llvm.vector.insert.v2i64.nxv16i64(, <2 x i64>, i64) + +declare @llvm.vector.insert.nxv4f16.nxv2f16(, , i64) +declare @llvm.vector.insert.nxv8f16.nxv2f16(, , i64) +declare @llvm.vector.insert.nxv8f16.nxv4f16(, , i64) + +declare @llvm.vector.insert.nxv3f32.nxv2f32(, , i64) +declare @llvm.vector.insert.nxv4f32.nxv1f32(, , i64) +declare @llvm.vector.insert.nxv4f32.nxv2f32(, , i64) + +declare @llvm.vector.insert.nxv2i1.v8i1(, <8 x i1>, i64) +declare @llvm.vector.insert.nxv4i1.v16i1(, <16 x i1>, i64) +declare @llvm.vector.insert.nxv8i1.v32i1(, <32 x i1>, i64) +declare @llvm.vector.insert.nx16i1.nxv4i1(, , i64) +declare @llvm.vector.insert.nx16i1.nxv8i1(, , i64) +declare @llvm.vector.insert.nxv16i1.v64i1(, <64 x i1>, i64) diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll @@ -588,7 +588,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI49_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call fast @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> , i64 0) + %1 = tail call fast @llvm.vector.insert.nxv2f64.v2f64( undef, <2 x double> , i64 0) %2 = tail call fast @llvm.aarch64.sve.dupq.lane.nxv2f64( %1, i64 0) ret %2 } @@ -600,7 +600,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI50_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call fast @llvm.experimental.vector.insert.nxv4f32.v4f32( undef, <4 x float> , i64 0) + %1 = tail call fast @llvm.vector.insert.nxv4f32.v4f32( undef, <4 x float> , i64 0) %2 = tail call fast @llvm.aarch64.sve.dupq.lane.nxv4f32( %1, i64 0) ret %2 } @@ -612,7 +612,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI51_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call fast @llvm.experimental.vector.insert.nxv8f16.v8f16( undef, <8 x half> , i64 0) + %1 = tail call fast @llvm.vector.insert.nxv8f16.v8f16( undef, <8 x half> , i64 0) %2 = tail call fast @llvm.aarch64.sve.dupq.lane.nxv8f16( %1, i64 0) ret %2 } @@ -624,7 +624,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI52_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> , i64 0) + %1 = call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> , i64 0) %2 = call @llvm.aarch64.sve.dupq.lane.nxv8bf16( %1, i64 0) ret %2 } @@ -636,7 +636,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI53_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) + %1 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %2 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %1, i64 0) ret %2 } @@ -648,7 +648,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI54_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) + %1 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %2 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %1, i64 0) ret %2 } @@ -660,7 +660,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI55_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> , i64 0) + %1 = tail call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> , i64 0) %2 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %1, i64 0) ret %2 } @@ -672,7 +672,7 @@ ; CHECK-NEXT: ldr q0, [x8, :lo12:.LCPI56_0] ; CHECK-NEXT: mov z0.q, q0 ; CHECK-NEXT: ret - %1 = tail call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) + %1 = tail call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) %2 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %1, i64 0) ret %2 } @@ -2559,14 +2559,14 @@ declare @llvm.aarch64.sve.zip2.nxv4f32(, ) declare @llvm.aarch64.sve.zip2.nxv2f64(, ) -declare @llvm.experimental.vector.insert.nxv2f64.v2f64(, <2 x double>, i64) -declare @llvm.experimental.vector.insert.nxv4f32.v4f32(, <4 x float>, i64) -declare @llvm.experimental.vector.insert.nxv8f16.v8f16(, <8 x half>, i64) -declare @llvm.experimental.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) -declare @llvm.experimental.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) -declare @llvm.experimental.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) -declare @llvm.experimental.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) -declare @llvm.experimental.vector.insert.nxv8bf16.v8bf16(, <8 x bfloat>, i64) +declare @llvm.vector.insert.nxv2f64.v2f64(, <2 x double>, i64) +declare @llvm.vector.insert.nxv4f32.v4f32(, <4 x float>, i64) +declare @llvm.vector.insert.nxv8f16.v8f16(, <8 x half>, i64) +declare @llvm.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) +declare @llvm.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) +declare @llvm.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) +declare @llvm.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) +declare @llvm.vector.insert.nxv8bf16.v8bf16(, <8 x bfloat>, i64) ; +bf16 is required for the bfloat version. attributes #0 = { "target-features"="+sve,+bf16" } diff --git a/llvm/test/CodeGen/AArch64/sve-no-typesize-warnings.ll b/llvm/test/CodeGen/AArch64/sve-no-typesize-warnings.ll --- a/llvm/test/CodeGen/AArch64/sve-no-typesize-warnings.ll +++ b/llvm/test/CodeGen/AArch64/sve-no-typesize-warnings.ll @@ -8,13 +8,13 @@ ; CHECK: // %bb.0: ; CHECK-NEXT: uaddl v0.4s, v0.4h, v1.4h ; CHECK-NEXT: ret -%a.lo = call <4 x i16> @llvm.experimental.vector.extract.v4i16.nxv8i16( %a, i64 0) +%a.lo = call <4 x i16> @llvm.vector.extract.v4i16.nxv8i16( %a, i64 0) %a.lo.zext = zext <4 x i16> %a.lo to <4 x i32> %b.zext = zext <4 x i16> %b to <4 x i32> %add = add <4 x i32> %a.lo.zext, %b.zext ret <4 x i32> %add } -declare <4 x i16> @llvm.experimental.vector.extract.v4i16.nxv8i16(, i64) +declare <4 x i16> @llvm.vector.extract.v4i16.nxv8i16(, i64) attributes #0 = { "target-features"="+sve" } diff --git a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll --- a/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll +++ b/llvm/test/CodeGen/AArch64/sve-punpklo-combine.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 10) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) @@ -30,7 +30,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) @@ -47,7 +47,7 @@ ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b ; CHECK-NEXT: ret %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) @@ -64,7 +64,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 10) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) @@ -84,7 +84,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) @@ -102,7 +102,7 @@ ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b ; CHECK-NEXT: ret %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) @@ -120,7 +120,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 10) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) @@ -141,7 +141,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) @@ -160,7 +160,7 @@ ; CHECK-NEXT: and p0.b, p0/z, p0.b, p1.b ; CHECK-NEXT: ret %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) @@ -179,7 +179,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv8i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv8i16( %p1, %ext1, zeroinitializer) @@ -199,7 +199,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 11) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv4i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 10) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv4i32( %p1, %ext1, zeroinitializer) @@ -218,7 +218,7 @@ ; CHECK-NEXT: ret %p0 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) %cmp = call @llvm.aarch64.sve.cmpeq.nxv16i8( %p0, %b, zeroinitializer) - %extract = call @llvm.experimental.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) + %extract = call @llvm.vector.extract.nxv2i1.nxv16i1( %cmp, i64 0) %ext1 = sext %extract to %p1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) %cmp1 = call @llvm.aarch64.sve.cmpne.nxv2i64( %p1, %ext1, zeroinitializer) @@ -232,9 +232,9 @@ declare @llvm.aarch64.sve.ptrue.nxv4i1(i32) declare @llvm.aarch64.sve.ptrue.nxv2i1(i32) -declare @llvm.experimental.vector.extract.nxv8i1.nxv16i1(, i64) -declare @llvm.experimental.vector.extract.nxv4i1.nxv16i1(, i64) -declare @llvm.experimental.vector.extract.nxv2i1.nxv16i1(, i64) +declare @llvm.vector.extract.nxv8i1.nxv16i1(, i64) +declare @llvm.vector.extract.nxv4i1.nxv16i1(, i64) +declare @llvm.vector.extract.nxv2i1.nxv16i1(, i64) declare @llvm.aarch64.sve.cmpne.nxv8i16(, , ) declare @llvm.aarch64.sve.cmpne.nxv4i32(, , ) diff --git a/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll b/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll --- a/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll +++ b/llvm/test/CodeGen/AArch64/sve-vecreduce-fold.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: ptest p0, p0.b ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret - %t = call @llvm.experimental.vector.insert.nxv16i1.nxv4i1( zeroinitializer, %in, i64 0) + %t = call @llvm.vector.insert.nxv16i1.nxv4i1( zeroinitializer, %in, i64 0) %res = call i1 @llvm.vector.reduce.or.nxv16i1( %t) ret i1 %res } @@ -22,7 +22,7 @@ ; CHECK-NEXT: ptest p0, p0.b ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret - %t = call @llvm.experimental.vector.insert.nxv16i1.nxv4i1( poison, %in, i64 0) + %t = call @llvm.vector.insert.nxv16i1.nxv4i1( poison, %in, i64 0) %res = call i1 @llvm.vector.reduce.or.nxv16i1( %t) ret i1 %res } @@ -38,7 +38,7 @@ ; CHECK-NEXT: ptest p0, p0.b ; CHECK-NEXT: cset w0, ne ; CHECK-NEXT: ret - %t = call @llvm.experimental.vector.insert.nxv16i1.nxv4i1( %vec, %in, i64 0) + %t = call @llvm.vector.insert.nxv16i1.nxv4i1( %vec, %in, i64 0) %res = call i1 @llvm.vector.reduce.or.nxv16i1( %t) ret i1 %res } @@ -57,7 +57,7 @@ ; CHECK-NEXT: ret %allones.ins = insertelement poison, i1 1, i32 0 %allones = shufflevector %allones.ins, poison, zeroinitializer - %t = call @llvm.experimental.vector.insert.nxv16i1.nxv4i1( %allones, %in, i64 0) + %t = call @llvm.vector.insert.nxv16i1.nxv4i1( %allones, %in, i64 0) %res = call i1 @llvm.vector.reduce.and.nxv16i1( %t) ret i1 %res } @@ -70,7 +70,7 @@ ; CHECK-NEXT: ptest p1, p0.b ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret - %t = call @llvm.experimental.vector.insert.nxv16i1.nxv4i1( poison, %in, i64 0) + %t = call @llvm.vector.insert.nxv16i1.nxv4i1( poison, %in, i64 0) %res = call i1 @llvm.vector.reduce.and.nxv16i1( %t) ret i1 %res } @@ -88,11 +88,11 @@ ; CHECK-NEXT: ptest p2, p0.b ; CHECK-NEXT: cset w0, eq ; CHECK-NEXT: ret - %t = call @llvm.experimental.vector.insert.nxv16i1.nxv4i1( %vec, %in, i64 0) + %t = call @llvm.vector.insert.nxv16i1.nxv4i1( %vec, %in, i64 0) %res = call i1 @llvm.vector.reduce.and.nxv16i1( %t) ret i1 %res } declare i1 @llvm.vector.reduce.and.nxv16i1() declare i1 @llvm.vector.reduce.or.nxv16i1() -declare @llvm.experimental.vector.insert.nxv16i1.nxv4i1(, , i64) +declare @llvm.vector.insert.nxv16i1.nxv4i1(, , i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 0) ret %c } @@ -15,7 +15,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( %vec, i64 4) + %c = call @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 4) ret %c } @@ -23,7 +23,7 @@ ; CHECK-LABEL: extract_nxv8i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 0) ret %c } @@ -32,7 +32,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 2) + %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 2) ret %c } @@ -41,7 +41,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 4) + %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 4) ret %c } @@ -50,7 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 6) + %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 6) ret %c } @@ -58,7 +58,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 0) ret %c } @@ -67,7 +67,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( %vec, i64 8) + %c = call @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 8) ret %c } @@ -75,7 +75,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 0) ret %c } @@ -84,7 +84,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 4) + %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 4) ret %c } @@ -93,7 +93,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 8) + %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 8) ret %c } @@ -102,7 +102,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 12) + %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 12) ret %c } @@ -110,7 +110,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 0) ret %c } @@ -119,7 +119,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 2) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 2) ret %c } @@ -128,7 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 4) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 4) ret %c } @@ -137,7 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 6) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 6) ret %c } @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 8) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 8) ret %c } @@ -155,7 +155,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 10) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 10) ret %c } @@ -164,7 +164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v14 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 12) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 12) ret %c } @@ -173,7 +173,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v15 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 14) + %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 14) ret %c } @@ -181,7 +181,7 @@ ; CHECK-LABEL: extract_nxv16i32_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 0) ret %c } @@ -193,7 +193,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 1) + %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 1) ret %c } @@ -205,7 +205,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 3) + %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 3) ret %c } @@ -217,7 +217,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v15, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 15) + %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 15) ret %c } @@ -226,7 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 2) + %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 2) ret %c } @@ -234,7 +234,7 @@ ; CHECK-LABEL: extract_nxv2i32_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( %vec, i64 0) + %c = call @llvm.vector.extract.nxv1i32.nxv2i32( %vec, i64 0) ret %c } @@ -242,7 +242,7 @@ ; CHECK-LABEL: extract_nxv32i8_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 0) + %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 0) ret %c } @@ -254,7 +254,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 2) + %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 2) ret %c } @@ -266,7 +266,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 4) + %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 4) ret %c } @@ -280,7 +280,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 6) + %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 6) ret %c } @@ -289,7 +289,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 8) + %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 8) ret %c } @@ -303,7 +303,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 22) + %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 22) ret %c } @@ -316,7 +316,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( %vec, i64 7) + %c = call @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 7) ret %c } @@ -330,7 +330,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( %vec, i64 3) + %c = call @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 3) ret %c } @@ -338,7 +338,7 @@ ; CHECK-LABEL: extract_nxv2f16_nxv16f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 0) + %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 0) ret %c } @@ -350,7 +350,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 2) + %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 2) ret %c } @@ -359,7 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 4) + %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 4) ret %c } @@ -367,7 +367,7 @@ ; CHECK-LABEL: extract_nxv64i1_nxv8i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv8i1( %mask, i64 0) + %c = call @llvm.vector.extract.nxv8i1( %mask, i64 0) ret %c } @@ -379,7 +379,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv8i1( %mask, i64 8) + %c = call @llvm.vector.extract.nxv8i1( %mask, i64 8) ret %c } @@ -387,7 +387,7 @@ ; CHECK-LABEL: extract_nxv64i1_nxv2i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i1( %mask, i64 0) + %c = call @llvm.vector.extract.nxv2i1( %mask, i64 0) ret %c } @@ -404,7 +404,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv2i1( %mask, i64 2) + %c = call @llvm.vector.extract.nxv2i1( %mask, i64 2) ret %c } @@ -412,7 +412,7 @@ ; CHECK-LABEL: extract_nxv4i1_nxv32i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i1( %x, i64 0) + %c = call @llvm.vector.extract.nxv4i1( %x, i64 0) ret %c } @@ -429,7 +429,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv4i1( %x, i64 4) + %c = call @llvm.vector.extract.nxv4i1( %x, i64 4) ret %c } @@ -437,7 +437,7 @@ ; CHECK-LABEL: extract_nxv16i1_nxv32i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv16i1( %x, i64 0) + %c = call @llvm.vector.extract.nxv16i1( %x, i64 0) ret %c } @@ -449,7 +449,7 @@ ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: ret - %c = call @llvm.experimental.vector.extract.nxv16i1( %x, i64 16) + %c = call @llvm.vector.extract.nxv16i1( %x, i64 16) ret %c } @@ -460,7 +460,7 @@ ; CHECK-LABEL: extract_nxv6f16_nxv12f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6f16.nxv12f16( %in, i64 0) + %res = call @llvm.vector.extract.nxv6f16.nxv12f16( %in, i64 0) ret %res } @@ -478,31 +478,31 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret - %res = call @llvm.experimental.vector.extract.nxv6f16.nxv12f16( %in, i64 6) + %res = call @llvm.vector.extract.nxv6f16.nxv12f16( %in, i64 6) ret %res } -declare @llvm.experimental.vector.extract.nxv6f16.nxv12f16(, i64) +declare @llvm.vector.extract.nxv6f16.nxv12f16(, i64) -declare @llvm.experimental.vector.extract.nxv1i8.nxv4i8( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv1i8.nxv8i8( %vec, i64 %idx) +declare @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 %idx) +declare @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv2i8.nxv32i8( %vec, i64 %idx) +declare @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv1i32.nxv2i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv1i32.nxv2i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv2i32.nxv8i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv4i32.nxv8i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv1i32.nxv16i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv2i32.nxv16i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv4i32.nxv16i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv8i32.nxv16i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 %idx) +declare @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv2f16.nxv16f16( %vec, i64 %idx) +declare @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv4i1( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv16i1( %vec, i64 %idx) +declare @llvm.vector.extract.nxv4i1( %vec, i64 %idx) +declare @llvm.vector.extract.nxv16i1( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv2i1( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv8i1( %vec, i64 %idx) +declare @llvm.vector.extract.nxv2i1( %vec, i64 %idx) +declare @llvm.vector.extract.nxv8i1( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x - %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0) + %c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 0) store <2 x i8> %c, <2 x i8>* %y ret void } @@ -27,7 +27,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <4 x i8>, <4 x i8>* %x - %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2) + %c = call <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %a, i64 2) store <2 x i8> %c, <2 x i8>* %y ret void } @@ -41,7 +41,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x - %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0) + %c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 0) store <2 x i8> %c, <2 x i8>* %y ret void } @@ -57,7 +57,7 @@ ; CHECK-NEXT: vse8.v v8, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x - %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6) + %c = call <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %a, i64 6) store <2 x i8> %c, <2 x i8>* %y ret void } @@ -79,7 +79,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x - %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0) + %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 0) store <2 x i32> %c, <2 x i32>* %y ret void } @@ -105,7 +105,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x - %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2) + %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 2) store <2 x i32> %c, <2 x i32>* %y ret void } @@ -132,7 +132,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x - %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6) + %c = call <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %a, i64 6) store <2 x i32> %c, <2 x i32>* %y ret void } @@ -143,7 +143,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32( %x, i64 0) + %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %x, i64 0) store <2 x i32> %c, <2 x i32>* %y ret void } @@ -156,7 +156,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32( %x, i64 6) + %c = call <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %x, i64 6) store <2 x i32> %c, <2 x i32>* %y ret void } @@ -167,7 +167,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( %x, i64 0) + %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %x, i64 0) store <2 x i8> %c, <2 x i8>* %y ret void } @@ -180,7 +180,7 @@ ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( %x, i64 2) + %c = call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %x, i64 2) store <2 x i8> %c, <2 x i8>* %y ret void } @@ -204,7 +204,7 @@ ; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: vse32.v v16, (a0) ; LMULMAX1-NEXT: ret - %c = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv16i32( %x, i64 8) + %c = call <8 x i32> @llvm.vector.extract.v8i32.nxv16i32( %x, i64 8) store <8 x i32> %c, <8 x i32>* %y ret void } @@ -227,7 +227,7 @@ ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0) + %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 0) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -254,7 +254,7 @@ ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8) + %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 8) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -281,7 +281,7 @@ ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48) + %c = call <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %a, i64 48) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -292,7 +292,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1( %x, i64 0) + %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv2i1( %x, i64 0) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -303,7 +303,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v0, (a0) ; CHECK-NEXT: ret - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 0) + %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %x, i64 0) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -316,7 +316,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 8) + %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %x, i64 8) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -329,7 +329,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %x, i64 48) + %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %x, i64 48) store <8 x i1> %c, <8 x i1>* %y ret void } @@ -369,7 +369,7 @@ ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0) + %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 0) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -418,7 +418,7 @@ ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2) + %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 2) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -469,7 +469,7 @@ ; LMULMAX1-NEXT: vsm.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42) + %c = call <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %a, i64 42) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -488,7 +488,7 @@ ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %x, i64 0) + %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1( %x, i64 0) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -513,7 +513,7 @@ ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %x, i64 2) + %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv2i1( %x, i64 2) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -532,7 +532,7 @@ ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 0) + %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1( %x, i64 0) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -557,7 +557,7 @@ ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 2) + %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1( %x, i64 2) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -583,7 +583,7 @@ ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %x, i64 42) + %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv64i1( %x, i64 42) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -608,7 +608,7 @@ ; CHECK-NEXT: vmsne.vi v8, v9, 0 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1( %x, i64 26) + %c = call <2 x i1> @llvm.vector.extract.v2i1.nxv32i1( %x, i64 26) store <2 x i1> %c, <2 x i1>* %y ret void } @@ -621,28 +621,28 @@ ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret - %c = call <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1( %x, i64 16) + %c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1( %x, i64 16) store <8 x i1> %c, <8 x i1>* %y ret void } -declare <2 x i1> @llvm.experimental.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx) -declare <8 x i1> @llvm.experimental.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx) +declare <2 x i1> @llvm.vector.extract.v2i1.v64i1(<64 x i1> %vec, i64 %idx) +declare <8 x i1> @llvm.vector.extract.v8i1.v64i1(<64 x i1> %vec, i64 %idx) -declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv2i1( %vec, i64 %idx) -declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv2i1( %vec, i64 %idx) +declare <2 x i1> @llvm.vector.extract.v2i1.nxv2i1( %vec, i64 %idx) +declare <8 x i1> @llvm.vector.extract.v8i1.nxv2i1( %vec, i64 %idx) -declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv32i1( %vec, i64 %idx) -declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv32i1( %vec, i64 %idx) +declare <2 x i1> @llvm.vector.extract.v2i1.nxv32i1( %vec, i64 %idx) +declare <8 x i1> @llvm.vector.extract.v8i1.nxv32i1( %vec, i64 %idx) -declare <2 x i1> @llvm.experimental.vector.extract.v2i1.nxv64i1( %vec, i64 %idx) -declare <8 x i1> @llvm.experimental.vector.extract.v8i1.nxv64i1( %vec, i64 %idx) +declare <2 x i1> @llvm.vector.extract.v2i1.nxv64i1( %vec, i64 %idx) +declare <8 x i1> @llvm.vector.extract.v8i1.nxv64i1( %vec, i64 %idx) -declare <2 x i8> @llvm.experimental.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx) -declare <2 x i8> @llvm.experimental.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx) -declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx) +declare <2 x i8> @llvm.vector.extract.v2i8.v4i8(<4 x i8> %vec, i64 %idx) +declare <2 x i8> @llvm.vector.extract.v2i8.v8i8(<8 x i8> %vec, i64 %idx) +declare <2 x i32> @llvm.vector.extract.v2i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <2 x i8> @llvm.experimental.vector.extract.v2i8.nxv2i8( %vec, i64 %idx) +declare <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( %vec, i64 %idx) -declare <2 x i32> @llvm.experimental.vector.extract.v2i32.nxv16i32( %vec, i64 %idx) -declare <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv16i32( %vec, i64 %idx) +declare <2 x i32> @llvm.vector.extract.v2i32.nxv16i32( %vec, i64 %idx) +declare <8 x i32> @llvm.vector.extract.v8i32.nxv16i32( %vec, i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll @@ -18,7 +18,7 @@ ; CHECK-NEXT: vslideup.vi v8, v12, 0 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp - %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 0) + %v = call @llvm.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 0) ret %v } @@ -31,7 +31,7 @@ ; CHECK-NEXT: vslideup.vi v8, v12, 2 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp - %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 2) + %v = call @llvm.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 2) ret %v } @@ -44,7 +44,7 @@ ; CHECK-NEXT: vslideup.vi v8, v12, 6 ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp - %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 6) + %v = call @llvm.vector.insert.v2i32.nxv8i32( %vec, <2 x i32> %sv, i64 6) ret %v } @@ -69,7 +69,7 @@ ; LMULMAX1-NEXT: vslideup.vi v8, v16, 4 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp - %v = call @llvm.experimental.vector.insert.v8i32.nxv8i32( %vec, <8 x i32> %sv, i64 0) + %v = call @llvm.vector.insert.v8i32.nxv8i32( %vec, <8 x i32> %sv, i64 0) ret %v } @@ -94,7 +94,7 @@ ; LMULMAX1-NEXT: vslideup.vi v8, v16, 12 ; LMULMAX1-NEXT: ret %sv = load <8 x i32>, <8 x i32>* %svp - %v = call @llvm.experimental.vector.insert.v8i32.nxv8i32( %vec, <8 x i32> %sv, i64 8) + %v = call @llvm.vector.insert.v8i32.nxv8i32( %vec, <8 x i32> %sv, i64 8) ret %v } @@ -105,7 +105,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp - %v = call @llvm.experimental.vector.insert.v2i32.nxv8i32( undef, <2 x i32> %sv, i64 0) + %v = call @llvm.vector.insert.v2i32.nxv8i32( undef, <2 x i32> %sv, i64 0) ret %v } @@ -123,7 +123,7 @@ ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <4 x i32>, <4 x i32>* %vp - %v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 0) + %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 0) store <4 x i32> %v, <4 x i32>* %vp ret void } @@ -141,7 +141,7 @@ ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <4 x i32>, <4 x i32>* %vp - %v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 2) + %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> %vec, <2 x i32> %sv, i64 2) store <4 x i32> %v, <4 x i32>* %vp ret void } @@ -155,7 +155,7 @@ ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp - %v = call <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0) + %v = call <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32> undef, <2 x i32> %sv, i64 0) store <4 x i32> %v, <4 x i32>* %vp ret void } @@ -186,7 +186,7 @@ ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <8 x i32>, <8 x i32>* %vp - %v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0) + %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 0) store <8 x i32> %v, <8 x i32>* %vp ret void } @@ -216,7 +216,7 @@ ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <8 x i32>, <8 x i32>* %vp - %v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2) + %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 2) store <8 x i32> %v, <8 x i32>* %vp ret void } @@ -246,7 +246,7 @@ ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp %vec = load <8 x i32>, <8 x i32>* %vp - %v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6) + %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> %vec, <2 x i32> %sv, i64 6) store <8 x i32> %v, <8 x i32>* %vp ret void } @@ -271,7 +271,7 @@ ; LMULMAX1-NEXT: vse32.v v9, (a0) ; LMULMAX1-NEXT: ret %sv = load <2 x i32>, <2 x i32>* %svp - %v = call <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6) + %v = call <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32> undef, <2 x i32> %sv, i64 6) store <8 x i32> %v, <8 x i32>* %vp ret void } @@ -290,7 +290,7 @@ ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %vp %sv = load <2 x i16>, <2 x i16>* %svp - %c = call <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 0) + %c = call <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 0) store <4 x i16> %c, <4 x i16>* %vp ret void } @@ -308,7 +308,7 @@ ; CHECK-NEXT: ret %v = load <4 x i16>, <4 x i16>* %vp %sv = load <2 x i16>, <2 x i16>* %svp - %c = call <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 2) + %c = call <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16> %v, <2 x i16> %sv, i64 2) store <4 x i16> %c, <4 x i16>* %vp ret void } @@ -340,7 +340,7 @@ ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp %sv = load <8 x i1>, <8 x i1>* %svp - %c = call <32 x i1> @llvm.experimental.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0) + %c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 0) store <32 x i1> %c, <32 x i1>* %vp ret void } @@ -373,7 +373,7 @@ ; LMULMAX1-NEXT: ret %v = load <32 x i1>, <32 x i1>* %vp %sv = load <8 x i1>, <8 x i1>* %svp - %c = call <32 x i1> @llvm.experimental.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16) + %c = call <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1> %v, <8 x i1> %sv, i64 16) store <32 x i1> %c, <32 x i1>* %vp ret void } @@ -400,7 +400,7 @@ ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp - %c = call <8 x i1> @llvm.experimental.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 0) + %c = call <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 0) store <8 x i1> %c, <8 x i1>* %vp ret void } @@ -427,7 +427,7 @@ ; CHECK-NEXT: ret %v = load <8 x i1>, <8 x i1>* %vp %sv = load <4 x i1>, <4 x i1>* %svp - %c = call <8 x i1> @llvm.experimental.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 4) + %c = call <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1> %v, <4 x i1> %sv, i64 4) store <8 x i1> %c, <8 x i1>* %vp ret void } @@ -441,7 +441,7 @@ ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp - %c = call @llvm.experimental.vector.insert.v2i16.nxv2i16( %v, <2 x i16> %sv, i64 0) + %c = call @llvm.vector.insert.v2i16.nxv2i16( %v, <2 x i16> %sv, i64 0) ret %c } @@ -454,7 +454,7 @@ ; CHECK-NEXT: vslideup.vi v8, v9, 4 ; CHECK-NEXT: ret %sv = load <2 x i16>, <2 x i16>* %svp - %c = call @llvm.experimental.vector.insert.v2i16.nxv2i16( %v, <2 x i16> %sv, i64 4) + %c = call @llvm.vector.insert.v2i16.nxv2i16( %v, <2 x i16> %sv, i64 4) ret %c } @@ -476,7 +476,7 @@ ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %sv = load <4 x i1>, <4 x i1>* %svp - %c = call @llvm.experimental.vector.insert.v4i1.nxv2i1( %v, <4 x i1> %sv, i64 0) + %c = call @llvm.vector.insert.v4i1.nxv2i1( %v, <4 x i1> %sv, i64 0) ret %c } @@ -489,7 +489,7 @@ ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp - %c = call @llvm.experimental.vector.insert.v8i1.nxv8i1( %v, <8 x i1> %sv, i64 0) + %c = call @llvm.vector.insert.v8i1.nxv8i1( %v, <8 x i1> %sv, i64 0) ret %c } @@ -502,11 +502,11 @@ ; CHECK-NEXT: vslideup.vi v0, v8, 2 ; CHECK-NEXT: ret %sv = load <8 x i1>, <8 x i1>* %svp - %c = call @llvm.experimental.vector.insert.v8i1.nxv8i1( %v, <8 x i1> %sv, i64 16) + %c = call @llvm.vector.insert.v8i1.nxv8i1( %v, <8 x i1> %sv, i64 16) ret %c } -declare @llvm.experimental.vector.insert.v2i64.nxv16i64(, <2 x i64>, i64) +declare @llvm.vector.insert.v2i64.nxv16i64(, <2 x i64>, i64) define void @insert_v2i64_nxv16i64(<2 x i64>* %psv0, <2 x i64>* %psv1, * %out) { ; CHECK-LABEL: insert_v2i64_nxv16i64: @@ -520,8 +520,8 @@ ; CHECK-NEXT: ret %sv0 = load <2 x i64>, <2 x i64>* %psv0 %sv1 = load <2 x i64>, <2 x i64>* %psv1 - %v0 = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv0, i64 0) - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( %v0, <2 x i64> %sv1, i64 4) + %v0 = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv0, i64 0) + %v = call @llvm.vector.insert.v2i64.nxv16i64( %v0, <2 x i64> %sv1, i64 4) store %v, * %out ret void } @@ -534,7 +534,7 @@ ; CHECK-NEXT: vs8r.v v8, (a1) ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 0) + %v = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 0) store %v, * %out ret void } @@ -549,7 +549,7 @@ ; CHECK-NEXT: vs8r.v v16, (a1) ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 2) + %v = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 2) store %v, * %out ret void } @@ -585,24 +585,24 @@ ; CHECK-NEXT: addi sp, sp, 64 ; CHECK-NEXT: ret %sv = load <2 x i64>, <2 x i64>* %psv - %v = call @llvm.experimental.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 8) + %v = call @llvm.vector.insert.v2i64.nxv16i64( undef, <2 x i64> %sv, i64 8) store %v, * %out ret void } -declare <8 x i1> @llvm.experimental.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64) -declare <32 x i1> @llvm.experimental.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64) +declare <8 x i1> @llvm.vector.insert.v4i1.v8i1(<8 x i1>, <4 x i1>, i64) +declare <32 x i1> @llvm.vector.insert.v8i1.v32i1(<32 x i1>, <8 x i1>, i64) -declare <4 x i16> @llvm.experimental.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64) +declare <4 x i16> @llvm.vector.insert.v2i16.v4i16(<4 x i16>, <2 x i16>, i64) -declare <4 x i32> @llvm.experimental.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64) -declare <8 x i32> @llvm.experimental.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64) +declare <4 x i32> @llvm.vector.insert.v2i32.v4i32(<4 x i32>, <2 x i32>, i64) +declare <8 x i32> @llvm.vector.insert.v2i32.v8i32(<8 x i32>, <2 x i32>, i64) -declare @llvm.experimental.vector.insert.v4i1.nxv2i1(, <4 x i1>, i64) -declare @llvm.experimental.vector.insert.v8i1.nxv8i1(, <8 x i1>, i64) +declare @llvm.vector.insert.v4i1.nxv2i1(, <4 x i1>, i64) +declare @llvm.vector.insert.v8i1.nxv8i1(, <8 x i1>, i64) -declare @llvm.experimental.vector.insert.v2i16.nxv2i16(, <2 x i16>, i64) +declare @llvm.vector.insert.v2i16.nxv2i16(, <2 x i16>, i64) -declare @llvm.experimental.vector.insert.v2i32.nxv8i32(, <2 x i32>, i64) -declare @llvm.experimental.vector.insert.v4i32.nxv8i32(, <4 x i32>, i64) -declare @llvm.experimental.vector.insert.v8i32.nxv8i32(, <8 x i32>, i64) +declare @llvm.vector.insert.v2i32.nxv8i32(, <2 x i32>, i64) +declare @llvm.vector.insert.v4i32.nxv8i32(, <4 x i32>, i64) +declare @llvm.vector.insert.v8i32.nxv8i32(, <8 x i32>, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll --- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll @@ -7,7 +7,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 0) ret %v } @@ -16,7 +16,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 4) + %v = call @llvm.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 4) ret %v } @@ -25,7 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 0) ret %v } @@ -34,7 +34,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v12 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 2) + %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 2) ret %v } @@ -43,7 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v12 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 4) + %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 4) ret %v } @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 6) + %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 6) ret %v } @@ -64,7 +64,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v8, v9, 0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) ret %v } @@ -79,7 +79,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) + %v = call @llvm.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) ret %v } @@ -88,7 +88,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 0) ret %v } @@ -97,7 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v12, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 8) + %v = call @llvm.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 8) ret %v } @@ -106,7 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 0) ret %v } @@ -115,7 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v10, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 4) + %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 4) ret %v } @@ -124,7 +124,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v12, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 8) + %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 8) ret %v } @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v14, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 12) + %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 12) ret %v } @@ -142,7 +142,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 0) ret %v } @@ -151,7 +151,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 2) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 2) ret %v } @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 4) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 4) ret %v } @@ -169,7 +169,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v11, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 6) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 6) ret %v } @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 8) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 8) ret %v } @@ -187,7 +187,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v13, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 10) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 10) ret %v } @@ -196,7 +196,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v14, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 12) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 12) ret %v } @@ -205,7 +205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v15, v16 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 14) + %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 14) ret %v } @@ -217,7 +217,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) ret %v } @@ -230,7 +230,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) + %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) ret %v } @@ -242,7 +242,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vslideup.vi v11, v16, 0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) + %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) ret %v } @@ -254,7 +254,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v10, 0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) ret %v } @@ -267,7 +267,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) + %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) ret %v } @@ -281,7 +281,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) + %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) ret %v } @@ -296,7 +296,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) + %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) ret %v } @@ -309,7 +309,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) + %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) ret %v } @@ -322,7 +322,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu ; CHECK-NEXT: vslideup.vx v9, v10, a1 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) + %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) ret %v } @@ -334,7 +334,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v8, v16, 0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) ret %v } @@ -347,7 +347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) + %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) ret %v } @@ -360,7 +360,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v14, v16, a0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) + %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) ret %v } @@ -368,7 +368,7 @@ ; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 0) + %v = call @llvm.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 0) ret %v } @@ -382,7 +382,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vx v14, v8, a0 ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 26) + %v = call @llvm.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 26) ret %v } @@ -394,7 +394,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vi v0, v8, 0 ; CHECK-NEXT: ret - %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) + %vec = call @llvm.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) ret %vec } @@ -407,7 +407,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu ; CHECK-NEXT: vslideup.vx v0, v8, a0 ; CHECK-NEXT: ret - %vec = call @llvm.experimental.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) + %vec = call @llvm.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) ret %vec } @@ -428,7 +428,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret - %vec = call @llvm.experimental.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 0) + %vec = call @llvm.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 0) ret %vec } @@ -451,11 +451,11 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret - %vec = call @llvm.experimental.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 2) + %vec = call @llvm.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 2) ret %vec } -declare @llvm.experimental.vector.insert.nxv8i64.nxv16i64(, , i64) +declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64) define void @insert_nxv8i64_nxv16i64( %sv0, %sv1, * %out) { ; CHECK-LABEL: insert_nxv8i64_nxv16i64: @@ -466,8 +466,8 @@ ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs8r.v v16, (a0) ; CHECK-NEXT: ret - %v0 = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) - %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( %v0, %sv1, i64 8) + %v0 = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) + %v = call @llvm.vector.insert.nxv8i64.nxv16i64( %v0, %sv1, i64 8) store %v, * %out ret void } @@ -477,7 +477,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) + %v = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) store %v, * %out ret void } @@ -490,25 +490,25 @@ ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret - %v = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 8) + %v = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 8) store %v, * %out ret void } -declare @llvm.experimental.vector.insert.nxv1i1.nxv4i1(, , i64) -declare @llvm.experimental.vector.insert.nxv8i1.nxv32i1(, , i64) +declare @llvm.vector.insert.nxv1i1.nxv4i1(, , i64) +declare @llvm.vector.insert.nxv8i1.nxv32i1(, , i64) -declare @llvm.experimental.vector.insert.nxv1i8.nxv16i8(, , i64) +declare @llvm.vector.insert.nxv1i8.nxv16i8(, , i64) -declare @llvm.experimental.vector.insert.nxv1f16.nxv32f16(, , i64) -declare @llvm.experimental.vector.insert.nxv2f16.nxv32f16(, , i64) +declare @llvm.vector.insert.nxv1f16.nxv32f16(, , i64) +declare @llvm.vector.insert.nxv2f16.nxv32f16(, , i64) -declare @llvm.experimental.vector.insert.nxv1i8.nxv4i8(, , i64 %idx) +declare @llvm.vector.insert.nxv1i8.nxv4i8(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv2i32.nxv8i32(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv4i32.nxv8i32(, , i64 %idx) +declare @llvm.vector.insert.nxv2i32.nxv8i32(, , i64 %idx) +declare @llvm.vector.insert.nxv4i32.nxv8i32(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv1i32.nxv16i32(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv2i32.nxv16i32(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv4i32.nxv16i32(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv8i32.nxv16i32(, , i64 %idx) +declare @llvm.vector.insert.nxv1i32.nxv16i32(, , i64 %idx) +declare @llvm.vector.insert.nxv2i32.nxv16i32(, , i64 %idx) +declare @llvm.vector.insert.nxv4i32.nxv16i32(, , i64 %idx) +declare @llvm.vector.insert.nxv8i32.nxv16i32(, , i64 %idx) diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -1266,8 +1266,8 @@ declare @llvm.masked.gather.nxv16i64.nxv16p0f64(, i32, , ) -declare @llvm.experimental.vector.insert.nxv8i64.nxv16i64(, , i64 %idx) -declare @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64(, , i64 %idx) +declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64 %idx) +declare @llvm.vector.insert.nxv8p0i64.nxv16p0i64(, , i64 %idx) define void @mgather_nxv16i64( %ptrs0, %ptrs1, %m, %passthru0, %passthru1, * %out) { ; RV32-LABEL: mgather_nxv16i64: @@ -1318,11 +1318,11 @@ ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret - %p0 = call @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64( undef, %ptrs0, i64 0) - %p1 = call @llvm.experimental.vector.insert.nxv8p0i64.nxv16p0i64( %p0, %ptrs1, i64 8) + %p0 = call @llvm.vector.insert.nxv8p0i64.nxv16p0i64( undef, %ptrs0, i64 0) + %p1 = call @llvm.vector.insert.nxv8p0i64.nxv16p0i64( %p0, %ptrs1, i64 8) - %pt0 = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( undef, %passthru0, i64 0) - %pt1 = call @llvm.experimental.vector.insert.nxv8i64.nxv16i64( %pt0, %passthru1, i64 8) + %pt0 = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %passthru0, i64 0) + %pt1 = call @llvm.vector.insert.nxv8i64.nxv16i64( %pt0, %passthru1, i64 8) %v = call @llvm.masked.gather.nxv16i64.nxv16p0f64( %p1, i32 8, %m, %pt1) store %v, * %out diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll @@ -1819,8 +1819,8 @@ declare void @llvm.masked.scatter.nxv16f64.nxv16p0f64(, , i32, ) -declare @llvm.experimental.vector.insert.nxv8f64.nxv16f64(, , i64) -declare @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(, , i64) +declare @llvm.vector.insert.nxv8f64.nxv16f64(, , i64) +declare @llvm.vector.insert.nxv8p0f64.nxv16p0f64(, , i64) define void @mscatter_nxv16f64( %val0, %val1, %ptrs0, %ptrs1, %m) { ; RV32-LABEL: mscatter_nxv16f64: @@ -1863,10 +1863,10 @@ ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret - %p0 = call @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64( undef, %ptrs0, i64 0) - %p1 = call @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64( %p0, %ptrs1, i64 8) - %v0 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) - %v1 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( %v0, %val1, i64 8) + %p0 = call @llvm.vector.insert.nxv8p0f64.nxv16p0f64( undef, %ptrs0, i64 0) + %p1 = call @llvm.vector.insert.nxv8p0f64.nxv16p0f64( %p0, %ptrs1, i64 8) + %v0 = call @llvm.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) + %v1 = call @llvm.vector.insert.nxv8f64.nxv16f64( %v0, %val1, i64 8) call void @llvm.masked.scatter.nxv16f64.nxv16p0f64( %v1, %p1, i32 8, %m) ret void } @@ -1905,8 +1905,8 @@ ; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs - %v0 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) - %v1 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( %v0, %val1, i64 8) + %v0 = call @llvm.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) + %v1 = call @llvm.vector.insert.nxv8f64.nxv16f64( %v0, %val1, i64 8) call void @llvm.masked.scatter.nxv16f64.nxv16p0f64( %v1, %ptrs, i32 8, %m) ret void } @@ -1945,8 +1945,8 @@ ; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs - %v0 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) - %v1 = call @llvm.experimental.vector.insert.nxv8f64.nxv16f64( %v0, %val1, i64 8) + %v0 = call @llvm.vector.insert.nxv8f64.nxv16f64( undef, %val0, i64 0) + %v1 = call @llvm.vector.insert.nxv8f64.nxv16f64( %v0, %val1, i64 8) call void @llvm.masked.scatter.nxv16f64.nxv16p0f64( %v1, %ptrs, i32 8, %m) ret void } diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll @@ -482,8 +482,8 @@ declare @llvm.vp.load.nxv17f64.p0nxv17f64(*, , i32) -declare @llvm.experimental.vector.extract.nxv1f64( %vec, i64 %idx) -declare @llvm.experimental.vector.extract.nxv16f64( %vec, i64 %idx) +declare @llvm.vector.extract.nxv1f64( %vec, i64 %idx) +declare @llvm.vector.extract.nxv16f64( %vec, i64 %idx) ; Note: We can't return as that introduces a vector ; store can't yet be legalized through widening. In order to test purely the @@ -542,8 +542,8 @@ ; CHECK-NEXT: vs1r.v v24, (a1) ; CHECK-NEXT: ret %load = call @llvm.vp.load.nxv17f64.p0nxv17f64(* %ptr, %m, i32 %evl) - %lo = call @llvm.experimental.vector.extract.nxv16f64( %load, i64 0) - %hi = call @llvm.experimental.vector.extract.nxv1f64( %load, i64 16) + %lo = call @llvm.vector.extract.nxv16f64( %load, i64 0) + %hi = call @llvm.vector.extract.nxv1f64( %load, i64 16) store %hi, * %out ret %lo } diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-cmpne.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-cmpne.ll --- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-cmpne.ll +++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-opts-cmpne.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: @dupq_b_0( ; CHECK: ret zeroinitializer %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, + %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) @@ -23,7 +23,7 @@ ; CHECK-NEXT: %2 = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( %1) ; CHECK-NEXT: ret %2 %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, + %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) @@ -38,7 +38,7 @@ ; CHECK-NEXT: %2 = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( %1) ; CHECK-NEXT: ret %2 %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, + %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) @@ -53,7 +53,7 @@ ; CHECK-NEXT: %2 = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( %1) ; CHECK-NEXT: ret %2 %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, + %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) @@ -67,7 +67,7 @@ ; CHECK: %1 = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) ; CHECK-NEXT: ret %1 %1 = tail call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, + %2 = tail call @llvm.vector.insert.nxv16i8.v16i8( undef, <16 x i8> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv16i8( %2 , i64 0) @@ -82,7 +82,7 @@ ; CHECK-LABEL: @dupq_h_0( ; CHECK: ret zeroinitializer %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, + %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -97,7 +97,7 @@ ; CHECK-NEXT: %3 = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %2) ; CHECK-NEXT: ret %3 %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, + %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -112,7 +112,7 @@ ; CHECK-NEXT: %3 = call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( %2) ; CHECK-NEXT: ret %3 %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, + %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -125,7 +125,7 @@ ; CHECK: %1 = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) ; CHECK-NEXT: ret %1 %1 = tail call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, + %2 = tail call @llvm.vector.insert.nxv8i16.v8i16( undef, <8 x i16> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv8i16( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -139,7 +139,7 @@ ; CHECK-LABEL: @dupq_w_0( ; CHECK: ret zeroinitializer %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -154,7 +154,7 @@ ; CHECK-NEXT: %3 = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( %2) ; CHECK-NEXT: ret %3 %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -167,7 +167,7 @@ ; CHECK: %1 = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) ; CHECK-NEXT: ret %1 %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -181,7 +181,7 @@ ; CHECK-LABEL: @dupq_d_0( ; CHECK: ret zeroinitializer %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -194,7 +194,7 @@ ; CHECK: %1 = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: ret %1 %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -209,7 +209,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -222,7 +222,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -235,7 +235,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -248,7 +248,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -261,7 +261,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, + %2 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -276,7 +276,7 @@ %1 = tail call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) %2 = zext i1 %a to i32 %3 = insertelement <4 x i32> , i32 %2, i32 3 - %4 = tail call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> %3, i64 0) + %4 = tail call @llvm.vector.insert.nxv4i32.v4i32( undef, <4 x i32> %3, i64 0) %5 = tail call @llvm.aarch64.sve.dupq.lane.nxv4i32( %4 , i64 0) %6 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) %7 = tail call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %1, %5, %6) @@ -288,7 +288,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 2) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -301,7 +301,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 1) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -314,7 +314,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( %x, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( %x, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -327,7 +327,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 1) @@ -339,7 +339,7 @@ ; CHECK-LABEL: @dupq_neg11( ; CHECK: cmpne ; CHECK-NEXT: ret - %1 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %1 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %2 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %1 , i64 0) %3 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -352,7 +352,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 15) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.dup.x.nxv2i64(i64 0) @@ -365,7 +365,7 @@ ; CHECK: cmpne ; CHECK-NEXT: ret %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) - %2 = tail call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, + %2 = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> , i64 0) %3 = tail call @llvm.aarch64.sve.dupq.lane.nxv2i64( %2 , i64 0) %4 = tail call @llvm.aarch64.sve.cmpne.nxv2i64( %1, %3, %x) @@ -377,10 +377,10 @@ declare @llvm.aarch64.sve.ptrue.nxv4i1(i32) declare @llvm.aarch64.sve.ptrue.nxv2i1(i32) -declare @llvm.experimental.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) -declare @llvm.experimental.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) -declare @llvm.experimental.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) -declare @llvm.experimental.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) +declare @llvm.vector.insert.nxv16i8.v16i8(, <16 x i8>, i64) +declare @llvm.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) +declare @llvm.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) +declare @llvm.vector.insert.nxv2i64.v2i64(, <2 x i64>, i64) declare @llvm.aarch64.sve.dupq.lane.nxv16i8(, i64) declare @llvm.aarch64.sve.dupq.lane.nxv8i16(, i64) diff --git a/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll b/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-vector-extract.ll @@ -1,15 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=instcombine -S | FileCheck %s -; llvm.experimental.vector.extract canonicalizes to shufflevector in the fixed case. In the +; llvm.vector.extract canonicalizes to shufflevector in the fixed case. In the ; scalable case, we lower to the EXTRACT_SUBVECTOR ISD node. -declare <10 x i32> @llvm.experimental.vector.extract.v10i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 %idx) -declare <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %vec, i64 %idx) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 %idx) -declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 %idx) +declare <10 x i32> @llvm.vector.extract.v10i32.v8i32(<8 x i32> %vec, i64 %idx) +declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 %idx) +declare <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 %idx) +declare <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( %vec, i64 %idx) +declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 %idx) +declare <8 x i32> @llvm.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 %idx) ; ============================================================================ ; ; Trivial cases @@ -20,7 +20,7 @@ ; CHECK-LABEL: @trivial_nop( ; CHECK-NEXT: ret <8 x i32> [[VEC:%.*]] ; - %1 = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 0) + %1 = call <8 x i32> @llvm.vector.extract.v8i32.v8i32(<8 x i32> %vec, i64 0) ret <8 x i32> %1 } @@ -33,7 +33,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 0) + %1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 0) ret <2 x i32> %1 } @@ -42,7 +42,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 2) + %1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 2) ret <2 x i32> %1 } @@ -51,7 +51,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 4) + %1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 4) ret <2 x i32> %1 } @@ -60,7 +60,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP1]] ; - %1 = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 6) + %1 = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<8 x i32> %vec, i64 6) ret <2 x i32> %1 } @@ -69,7 +69,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 0) + %1 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 0) ret <4 x i32> %1 } @@ -78,7 +78,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 4) + %1 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 4) ret <4 x i32> %1 } @@ -87,7 +87,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[TMP1]] ; - %1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 0) + %1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 0) ret <3 x i32> %1 } @@ -96,7 +96,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> poison, <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[TMP1]] ; - %1 = call <3 x i32> @llvm.experimental.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 3) + %1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> %vec, i64 3) ret <3 x i32> %1 } @@ -108,9 +108,9 @@ ; EXTRACT_SUBVECTOR ISD node later. define <4 x i32> @scalable_extract( %vec) { ; CHECK-LABEL: @scalable_extract( -; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( [[VEC:%.*]], i64 0) +; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( [[VEC:%.*]], i64 0) ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32( %vec, i64 0) + %1 = call <4 x i32> @llvm.vector.extract.v4i32.nxv4i32( %vec, i64 0) ret <4 x i32> %1 } diff --git a/llvm/test/Transforms/InstCombine/canonicalize-vector-insert.ll b/llvm/test/Transforms/InstCombine/canonicalize-vector-insert.ll --- a/llvm/test/Transforms/InstCombine/canonicalize-vector-insert.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-vector-insert.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=instcombine -S | FileCheck %s -; llvm.experimental.vector.insert canonicalizes to shufflevector in the fixed case. In the +; llvm.vector.insert canonicalizes to shufflevector in the fixed case. In the ; scalable case, we lower to the INSERT_SUBVECTOR ISD node. -declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 %idx) -declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 %idx) -declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 %idx) -declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 %idx) -declare @llvm.experimental.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 %idx) +declare <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 %idx) +declare <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 %idx) +declare <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 %idx) +declare <8 x i32> @llvm.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 %idx) +declare @llvm.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 %idx) ; ============================================================================ ; ; Trivial cases @@ -20,7 +20,7 @@ ; CHECK-LABEL: @trivial_nop( ; CHECK-NEXT: ret <8 x i32> [[SUBVEC:%.*]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 0) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v8i32(<8 x i32> %vec, <8 x i32> %subvec, i64 0) ret <8 x i32> %1 } @@ -34,7 +34,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[VEC:%.*]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 0) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 0) ret <8 x i32> %1 } @@ -44,7 +44,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 2) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 2) ret <8 x i32> %1 } @@ -54,7 +54,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 4) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 4) ret <8 x i32> %1 } @@ -64,7 +64,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 6) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v2i32(<8 x i32> %vec, <2 x i32> %subvec, i64 6) ret <8 x i32> %1 } @@ -74,7 +74,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[VEC:%.*]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 0) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 0) ret <8 x i32> %1 } @@ -84,7 +84,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 4) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 4) ret <8 x i32> %1 } @@ -94,7 +94,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> [[VEC:%.*]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 0) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 0) ret <8 x i32> %1 } @@ -104,7 +104,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i32> [[VEC:%.*]], <8 x i32> [[TMP1]], <8 x i32> ; CHECK-NEXT: ret <8 x i32> [[TMP2]] ; - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 3) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 3) ret <8 x i32> %1 } @@ -116,9 +116,9 @@ ; INSERT_SUBVECTOR ISD node later. define @scalable_insert( %vec, <4 x i32> %subvec) { ; CHECK-LABEL: @scalable_insert( -; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( [[VEC:%.*]], <4 x i32> [[SUBVEC:%.*]], i64 0) +; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.vector.insert.nxv4i32.v4i32( [[VEC:%.*]], <4 x i32> [[SUBVEC:%.*]], i64 0) ; CHECK-NEXT: ret [[TMP1]] ; - %1 = call @llvm.experimental.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 0) + %1 = call @llvm.vector.insert.nxv4i32.v4i32( %vec, <4 x i32> %subvec, i64 0) ret %1 } diff --git a/llvm/test/Transforms/InstSimplify/extract-vector.ll b/llvm/test/Transforms/InstSimplify/extract-vector.ll --- a/llvm/test/Transforms/InstSimplify/extract-vector.ll +++ b/llvm/test/Transforms/InstSimplify/extract-vector.ll @@ -5,22 +5,22 @@ ; CHECK-LABEL: @redundant_insert_extract_chain( ; CHECK-NEXT: ret <16 x i8> [[X:%.*]] ; - %inserted = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) - %extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8( %inserted, i64 0) + %inserted = call @llvm.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) + %extracted = call <16 x i8> @llvm.vector.extract.v16i8.nxv32i8( %inserted, i64 0) ret <16 x i8> %extracted } define <8 x i8> @non_redundant_insert_extract_chain(<16 x i8> %x) { ; CHECK-LABEL: @non_redundant_insert_extract_chain( -; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) -; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8( [[INSERTED]], i64 0) +; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.vector.insert.nxv32i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv32i8( [[INSERTED]], i64 0) ; CHECK-NEXT: ret <8 x i8> [[EXTRACTED]] ; - %inserted = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) - %extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8( %inserted, i64 0) + %inserted = call @llvm.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) + %extracted = call <8 x i8> @llvm.vector.extract.v8i8.nxv32i8( %inserted, i64 0) ret <8 x i8> %extracted } -declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(, i64) -declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(, i64) -declare @llvm.experimental.vector.insert.nxv32i8.v16i8(, <16 x i8>, i64) +declare <16 x i8> @llvm.vector.extract.v16i8.nxv32i8(, i64) +declare <8 x i8> @llvm.vector.extract.v8i8.nxv32i8(, i64) +declare @llvm.vector.insert.nxv32i8.v16i8(, <16 x i8>, i64) diff --git a/llvm/test/Transforms/InstSimplify/insert-vector.ll b/llvm/test/Transforms/InstSimplify/insert-vector.ll --- a/llvm/test/Transforms/InstSimplify/insert-vector.ll +++ b/llvm/test/Transforms/InstSimplify/insert-vector.ll @@ -5,33 +5,33 @@ ; CHECK-LABEL: @redundant_extract_insert_chain( ; CHECK-NEXT: ret [[X:%.*]] ; - %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( %x, i64 0) - %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) + %extracted = call <32 x i8> @llvm.vector.extract.v32i8.nxv16i8( %x, i64 0) + %inserted = call @llvm.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) ret %inserted } define @non_redundant_extract_insert_chain_0( %x) { ; CHECK-LABEL: @non_redundant_extract_insert_chain_0( -; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8( [[X:%.*]], i64 0) -; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> [[EXTRACTED]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv32i8( [[X:%.*]], i64 0) +; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.vector.insert.nxv16i8.v32i8( undef, <32 x i8> [[EXTRACTED]], i64 0) ; CHECK-NEXT: ret [[INSERTED]] ; - %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8( %x, i64 0) - %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) + %extracted = call <32 x i8> @llvm.vector.extract.v32i8.nxv32i8( %x, i64 0) + %inserted = call @llvm.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) ret %inserted } define @non_redundant_extract_insert_chain_1( %x, %y) { ; CHECK-LABEL: @non_redundant_extract_insert_chain_1( -; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( [[X:%.*]], i64 0) -; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v32i8( [[Y:%.*]], <32 x i8> [[EXTRACTED]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv16i8( [[X:%.*]], i64 0) +; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.vector.insert.nxv16i8.v32i8( [[Y:%.*]], <32 x i8> [[EXTRACTED]], i64 0) ; CHECK-NEXT: ret [[INSERTED]] ; - %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( %x, i64 0) - %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( %y, <32 x i8> %extracted, i64 0) + %extracted = call <32 x i8> @llvm.vector.extract.v32i8.nxv16i8( %x, i64 0) + %inserted = call @llvm.vector.insert.nxv16i8.v32i8( %y, <32 x i8> %extracted, i64 0) ret %inserted } -declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(, i64) -declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(, i64) -declare @llvm.experimental.vector.insert.nxv16i8.v32i8(, <32 x i8>, i64) +declare <32 x i8> @llvm.vector.extract.v32i8.nxv16i8(, i64) +declare <32 x i8> @llvm.vector.extract.v32i8.nxv32i8(, i64) +declare @llvm.vector.insert.nxv16i8.v32i8(, <32 x i8>, i64) diff --git a/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll --- a/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll +++ b/llvm/test/Transforms/InterleavedAccess/AArch64/sve-interleaved-accesses.ll @@ -8,9 +8,9 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <32 x i16>* %ptr to i16* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv8i16( [[PTRUE]], i16* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT1:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv8i16( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT2:%.*]] = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv8i16( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv8i16( [[TMP3]], i64 0) ; CHECK-NEXT: ret void %interleaved.vec = load <32 x i16>, <32 x i16>* %ptr, align 4 %v0 = shufflevector <32 x i16> %interleaved.vec, <32 x i16> poison, <16 x i32> * %ptr to i32* ; CHECK-NEXT: [[LDN:%.*]] = call { , , } @llvm.aarch64.sve.ld3.sret.nxv4i32( [[PTRUE]], i32* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT1:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv4i32( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT2:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv4i32( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT3:%.*]] = call <8 x i32> @llvm.experimental.vector.extract.v8i32.nxv4i32( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv4i32( [[TMP4]], i64 0) ; CHECK-NEXT: ret void %interleaved.vec = load <24 x i32>, <24 x i32>* %ptr, align 4 %v0 = shufflevector <24 x i32> %interleaved.vec, <24 x i32> poison, <8 x i32> @@ -45,13 +45,13 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i64>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[LDN]], 3 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) +; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) ; CHECK-NEXT: ret void %interleaved.vec = load <16 x i64>, <16 x i64>* %ptr, align 4 %v0 = shufflevector <16 x i64> %interleaved.vec, <16 x i64> poison, <4 x i32> @@ -65,9 +65,9 @@ ; CHECK-LABEL: @store_factor2( ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i16> %v0, <16 x i16> %v1, <16 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v16i16( undef, <16 x i16> [[TMP1]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv8i16.v16i16( undef, <16 x i16> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i16> %v0, <16 x i16> %v1, <16 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v16i16( undef, <16 x i16> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv8i16.v16i16( undef, <16 x i16> [[TMP2]], i64 0) ; CHECK-NEXT: [[PTR:%.*]] = bitcast <32 x i16>* %ptr to i16* ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv8i16( [[INS1]], [[INS2]], [[PTRUE]], i16* [[PTR]]) ; CHECK-NEXT: ret void @@ -81,11 +81,11 @@ ; CHECK-LABEL: @store_factor3( ; CHECK: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <8 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v8i32( undef, <8 x i32> [[TMP1]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv4i32.v8i32( undef, <8 x i32> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <8 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv4i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i32> %s0, <16 x i32> %s1, <8 x i32> -; CHECK-NEXT: [[INS3:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v8i32( undef, <8 x i32> [[TMP3]], i64 0) +; CHECK-NEXT: [[INS3:%.*]] = call @llvm.vector.insert.nxv4i32.v8i32( undef, <8 x i32> [[TMP3]], i64 0) ; CHECK-NEXT: [[PTR:%.*]] = bitcast <24 x i32>* %ptr to i32* ; CHECK-NEXT: call void @llvm.aarch64.sve.st3.nxv4i32( [[INS1]], [[INS2]], [[INS3]], [[PTRUE]], i32* [[PTR]]) ; CHECK-NEXT: ret void @@ -103,13 +103,13 @@ ; CHECK-LABEL: @store_factor4( ; CHECK: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> -; CHECK-NEXT: [[INS3:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) +; CHECK-NEXT: [[INS3:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> %s0, <8 x i64> %s1, <4 x i32> -; CHECK-NEXT: [[INS4:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP4]], i64 0) +; CHECK-NEXT: [[INS4:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP4]], i64 0) ; CHECK-NEXT: [[PTR:%.*]] = bitcast <16 x i64>* %ptr to i64* ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv2i64( [[INS1]], [[INS2]], [[INS3]], [[INS4]], [[PTRUE]], i64* [[PTR]]) ; CHECK-NEXT: ret void @@ -126,10 +126,10 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i32*>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*> ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*> ; CHECK-NEXT: ret void %interleaved.vec = load <8 x i32*>, <8 x i32*>* %ptr, align 4 @@ -144,13 +144,13 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <12 x i32*>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , , } @llvm.aarch64.sve.ld3.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*> ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*> ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) ; CHECK-NEXT: [[TOP3:%.*]] = inttoptr <4 x i64> [[EXT3]] to <4 x i32*> ; CHECK-NEXT: ret void %interleaved.vec = load <12 x i32*>, <12 x i32*>* %ptr, align 4 @@ -166,16 +166,16 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32*>* %ptr to i64* ; CHECK-NEXT: [[LDN:%.*]] = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[LDN]], 3 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TOP1:%.*]] = inttoptr <4 x i64> [[EXT1]] to <4 x i32*> ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[LDN]], 2 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TOP2:%.*]] = inttoptr <4 x i64> [[EXT2]] to <4 x i32*> ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[LDN]], 1 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP4]], i64 0) ; CHECK-NEXT: [[TOP3:%.*]] = inttoptr <4 x i64> [[EXT3]] to <4 x i32*> ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[LDN]], 0 -; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) +; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TOP4:%.*]] = inttoptr <4 x i64> [[EXT4]] to <4 x i32*> ; CHECK-NEXT: ret void %interleaved.vec = load <16 x i32*>, <16 x i32*>* %ptr, align 4 @@ -192,9 +192,9 @@ ; CHECK-NEXT: [[TOI2:%.*]] = ptrtoint <4 x i32*> %v1 to <4 x i64> ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i64> [[TOI1]], <4 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x i64> [[TOI1]], <4 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) ; CHECK-NEXT: [[PTR:%.*]] = bitcast <8 x i32*>* %ptr to i64* ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv2i64( [[INS1]], [[INS2]], [[PTRUE]], i64* [[PTR]]) ; CHECK-NEXT: ret void @@ -209,11 +209,11 @@ ; CHECK-NEXT: [[TOI2:%.*]] = ptrtoint <8 x i32*> %s1 to <8 x i64> ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS3:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) +; CHECK-NEXT: [[INS3:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) ; CHECK-NEXT: [[PTR:%.*]] = bitcast <12 x i32*>* %ptr to i64* ; CHECK-NEXT: call void @llvm.aarch64.sve.st3.nxv2i64( [[INS1]], [[INS2]], [[INS3]], [[PTRUE]], i64* [[PTR]]) ; CHECK-NEXT: ret void @@ -230,13 +230,13 @@ ; CHECK-NEXT: [[TOI2:%.*]] = ptrtoint <8 x i32*> %s1 to <8 x i64> ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP1]], i64 0) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS3:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) +; CHECK-NEXT: [[INS3:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[TOI1]], <8 x i64> [[TOI2]], <4 x i32> -; CHECK-NEXT: [[INS4:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP4]], i64 0) +; CHECK-NEXT: [[INS4:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP4]], i64 0) ; CHECK-NEXT: [[PTR:%.*]] = bitcast <16 x i32*>* %ptr to i64* ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv2i64( [[INS1]], [[INS2]], [[INS3]], [[INS4]], [[PTRUE]], i64* [[PTR]]) ; CHECK-NEXT: ret void @@ -254,15 +254,15 @@ ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv2i64( [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) +; CHECK-NEXT: [[EXT1:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) +; CHECK-NEXT: [[EXT2:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[TMP1]], i32 8 ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv2i64( [[PTRUE]], i64* [[TMP4]]) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) +; CHECK-NEXT: [[EXT3:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[TMP6]], i64 0) +; CHECK-NEXT: [[EXT4:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv2i64( [[TMP6]], i64 0) ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i64> [[EXT1]], <4 x i64> [[EXT3]], <8 x i32> ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i64> [[EXT2]], <4 x i64> [[EXT4]], <8 x i32> ; CHECK-NEXT: ret void @@ -277,14 +277,14 @@ ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i64>* %ptr to i64* ; CHECK-NEXT: [[PTRUE:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> -; CHECK-NEXT: [[INS1:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) +; CHECK-NEXT: [[INS1:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> -; CHECK-NEXT: [[INS2:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) +; CHECK-NEXT: [[INS2:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP3]], i64 0) ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv2i64( [[INS1]], [[INS2]], [[PTRUE]], i64* [[TMP1]]) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> -; CHECK-NEXT: [[INS3:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP4]], i64 0) +; CHECK-NEXT: [[INS3:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <8 x i64> %v0, <8 x i64> %v1, <4 x i32> -; CHECK-NEXT: [[INS4:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP5]], i64 0) +; CHECK-NEXT: [[INS4:%.*]] = call @llvm.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, i64* [[TMP1]], i32 8 ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv2i64( [[INS3]], [[INS4]], [[PTRUE]], i64* [[TMP6]]) ; CHECK-NEXT: ret void @@ -357,13 +357,13 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x double>* [[PTR:%.*]] to double* ; CHECK-NEXT: [[LDN:%.*]] = call { , , , } @llvm.aarch64.sve.ld4.sret.nxv2f64( [[TMP1]], double* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[LDN]], 3 -; CHECK-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[LDN]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64( [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { , , , } [[LDN]], 1 -; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP7]], i64 0) +; CHECK-NEXT: [[TMP8:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64( [[TMP7]], i64 0) ; CHECK-NEXT: [[TMP9:%.*]] = extractvalue { , , , } [[LDN]], 0 -; CHECK-NEXT: [[TMP10:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[TMP9]], i64 0) +; CHECK-NEXT: [[TMP10:%.*]] = call <4 x double> @llvm.vector.extract.v4f64.nxv2f64( [[TMP9]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <16 x double>, <16 x double>* %ptr, align 4 @@ -380,11 +380,11 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <24 x float>* [[PTR:%.*]] to float* ; CHECK-NEXT: [[LDN:%.*]] = call { , , } @llvm.aarch64.sve.ld3.sret.nxv4f32( [[TMP1]], float* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[LDN]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[LDN]], 1 -; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32( [[TMP5]], i64 0) ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { , , } [[LDN]], 0 -; CHECK-NEXT: [[TMP8:%.*]] = call <8 x float> @llvm.experimental.vector.extract.v8f32.nxv4f32( [[TMP7]], i64 0) +; CHECK-NEXT: [[TMP8:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32( [[TMP7]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <24 x float>, <24 x float>* %ptr, align 4 @@ -400,9 +400,9 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x half>* [[PTR:%.*]] to half* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv8f16( [[TMP1]], half* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[TMP4:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <16 x half> @llvm.vector.extract.v16f16.nxv8f16( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = call <16 x half> @llvm.experimental.vector.extract.v16f16.nxv8f16( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <16 x half> @llvm.vector.extract.v16f16.nxv8f16( [[TMP5]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <32 x half>, <32 x half>* %ptr, align 4 @@ -417,9 +417,9 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <32 x bfloat>* [[PTR:%.*]] to bfloat* ; CHECK-NEXT: [[LDN:%.*]] = call { , } @llvm.aarch64.sve.ld2.sret.nxv8bf16( [[TMP1]], bfloat* [[TMP2]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[LDN]], 1 -; CHECK-NEXT: [[TMP4:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[TMP3]], i64 0) +; CHECK-NEXT: [[TMP4:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16( [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[LDN]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[TMP5]], i64 0) +; CHECK-NEXT: [[TMP6:%.*]] = call <16 x bfloat> @llvm.vector.extract.v16bf16.nxv8bf16( [[TMP5]], i64 0) ; CHECK-NEXT: ret void ; %interleaved.vec = load <32 x bfloat>, <32 x bfloat>* %ptr, align 4 @@ -434,13 +434,13 @@ ; CHECK-NEXT: [[S1:%.*]] = shufflevector <4 x double> [[V2:%.*]], <4 x double> [[V3:%.*]], <8 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP2]], i64 0) +; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP4]], i64 0) +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP6]], i64 0) +; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP6]], i64 0) ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> -; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP8]], i64 0) +; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP8]], i64 0) ; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x double>* [[PTR:%.*]] to double* ; CHECK-NEXT: call void @llvm.aarch64.sve.st4.nxv2f64( [[TMP3]], [[TMP5]], [[TMP7]], [[TMP9]], [[TMP1]], double* [[TMP10]]) ; CHECK-NEXT: ret void @@ -458,11 +458,11 @@ ; CHECK-NEXT: [[S1:%.*]] = shufflevector <8 x float> [[V2:%.*]], <8 x float> poison, <16 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x float> [[S0]], <16 x float> [[S1]], <8 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v8f32( undef, <8 x float> [[TMP2]], i64 0) +; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv4f32.v8f32( undef, <8 x float> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x float> [[S0]], <16 x float> [[S1]], <8 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v8f32( undef, <8 x float> [[TMP4]], i64 0) +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv4f32.v8f32( undef, <8 x float> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <16 x float> [[S0]], <16 x float> [[S1]], <8 x i32> -; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.v8f32( undef, <8 x float> [[TMP6]], i64 0) +; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vector.insert.nxv4f32.v8f32( undef, <8 x float> [[TMP6]], i64 0) ; CHECK-NEXT: [[TMP8:%.*]] = bitcast <24 x float>* [[PTR:%.*]] to float* ; CHECK-NEXT: call void @llvm.aarch64.sve.st3.nxv4f32( [[TMP3]], [[TMP5]], [[TMP7]], [[TMP1]], float* [[TMP8]]) ; CHECK-NEXT: ret void @@ -481,9 +481,9 @@ ; CHECK-LABEL: @store_half_factor2( ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x half> [[V0:%.*]], <16 x half> [[V1:%.*]], <16 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v16f16( undef, <16 x half> [[TMP2]], i64 0) +; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv8f16.v16f16( undef, <16 x half> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x half> [[V0]], <16 x half> [[V1]], <16 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.v16f16( undef, <16 x half> [[TMP4]], i64 0) +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv8f16.v16f16( undef, <16 x half> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x half>* [[PTR:%.*]] to half* ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv8f16( [[TMP3]], [[TMP5]], [[TMP1]], half* [[TMP6]]) ; CHECK-NEXT: ret void @@ -499,9 +499,9 @@ ; CHECK-LABEL: @store_bfloat_factor2( ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x bfloat> [[V0:%.*]], <16 x bfloat> [[V1:%.*]], <16 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP2]], i64 0) +; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <16 x bfloat> [[V0]], <16 x bfloat> [[V1]], <16 x i32> -; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP4]], i64 0) +; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = bitcast <32 x bfloat>* [[PTR:%.*]] to bfloat* ; CHECK-NEXT: call void @llvm.aarch64.sve.st2.nxv8bf16( [[TMP3]], [[TMP5]], [[TMP1]], bfloat* [[TMP6]]) ; CHECK-NEXT: ret void diff --git a/llvm/test/Verifier/extract-vector-mismatched-element-types.ll b/llvm/test/Verifier/extract-vector-mismatched-element-types.ll --- a/llvm/test/Verifier/extract-vector-mismatched-element-types.ll +++ b/llvm/test/Verifier/extract-vector-mismatched-element-types.ll @@ -1,9 +1,9 @@ ; RUN: not opt -verify -S < %s 2>&1 >/dev/null | FileCheck %s -; CHECK: experimental_vector_extract result must have the same element type as the input vector. +; CHECK: vector_extract result must have the same element type as the input vector. define <16 x i16> @invalid_mismatched_element_types( %vec) nounwind { - %retval = call <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv16i8( %vec, i64 0) + %retval = call <16 x i16> @llvm.vector.extract.v16i16.nxv16i8( %vec, i64 0) ret <16 x i16> %retval } -declare <16 x i16> @llvm.experimental.vector.extract.v16i16.nxv16i8(, i64) +declare <16 x i16> @llvm.vector.extract.v16i16.nxv16i8(, i64) diff --git a/llvm/test/Verifier/insert-extract-intrinsics-invalid.ll b/llvm/test/Verifier/insert-extract-intrinsics-invalid.ll --- a/llvm/test/Verifier/insert-extract-intrinsics-invalid.ll +++ b/llvm/test/Verifier/insert-extract-intrinsics-invalid.ll @@ -4,15 +4,15 @@ ; Test that extractions/insertion indices are validated. ; -; CHECK: experimental_vector_extract index must be a constant multiple of the result type's known minimum vector length. +; CHECK: vector_extract index must be a constant multiple of the result type's known minimum vector length. define <4 x i32> @extract_idx_not_constant_multiple(<8 x i32> %vec) { - %1 = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 1) + %1 = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %vec, i64 1) ret <4 x i32> %1 } -; CHECK: experimental_vector_insert index must be a constant multiple of the subvector's known minimum vector length. +; CHECK: vector_insert index must be a constant multiple of the subvector's known minimum vector length. define <8 x i32> @insert_idx_not_constant_multiple(<8 x i32> %vec, <4 x i32> %subvec) { - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 2) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32> %vec, <4 x i32> %subvec, i64 2) ret <8 x i32> %1 } @@ -20,53 +20,53 @@ ; Test that extractions/insertions which 'overrun' are captured. ; -; CHECK: experimental_vector_extract would overrun. +; CHECK: vector_extract would overrun. define <3 x i32> @extract_overrun_fixed_fixed(<8 x i32> %vec) { - %1 = call <3 x i32> @llvm.experimental.vector.extract.v8i32.v3i32(<8 x i32> %vec, i64 6) + %1 = call <3 x i32> @llvm.vector.extract.v8i32.v3i32(<8 x i32> %vec, i64 6) ret <3 x i32> %1 } -; CHECK: experimental_vector_extract would overrun. +; CHECK: vector_extract would overrun. define @extract_overrun_scalable_scalable( %vec) { - %1 = call @llvm.experimental.vector.extract.nxv8i32.nxv3i32( %vec, i64 6) + %1 = call @llvm.vector.extract.nxv8i32.nxv3i32( %vec, i64 6) ret %1 } ; We cannot statically check whether or not an extraction of a fixed vector ; from a scalable vector would overrun, because we can't compare the sizes of ; the two. Therefore, this function should not raise verifier errors. -; CHECK-NOT: experimental_vector_extract +; CHECK-NOT: vector_extract define <3 x i32> @extract_overrun_scalable_fixed( %vec) { - %1 = call <3 x i32> @llvm.experimental.vector.extract.nxv8i32.v3i32( %vec, i64 6) + %1 = call <3 x i32> @llvm.vector.extract.nxv8i32.v3i32( %vec, i64 6) ret <3 x i32> %1 } -; CHECK: subvector operand of experimental_vector_insert would overrun the vector being inserted into. +; CHECK: subvector operand of vector_insert would overrun the vector being inserted into. define <8 x i32> @insert_overrun_fixed_fixed(<8 x i32> %vec, <3 x i32> %subvec) { - %1 = call <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 6) + %1 = call <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32> %vec, <3 x i32> %subvec, i64 6) ret <8 x i32> %1 } -; CHECK: subvector operand of experimental_vector_insert would overrun the vector being inserted into. +; CHECK: subvector operand of vector_insert would overrun the vector being inserted into. define @insert_overrun_scalable_scalable( %vec, %subvec) { - %1 = call @llvm.experimental.vector.insert.nxv8i32.nxv3i32( %vec, %subvec, i64 6) + %1 = call @llvm.vector.insert.nxv8i32.nxv3i32( %vec, %subvec, i64 6) ret %1 } ; We cannot statically check whether or not an insertion of a fixed vector into ; a scalable vector would overrun, because we can't compare the sizes of the ; two. Therefore, this function should not raise verifier errors. -; CHECK-NOT: experimental_vector_insert +; CHECK-NOT: vector_insert define @insert_overrun_scalable_fixed( %vec, <3 x i32> %subvec) { - %1 = call @llvm.experimental.vector.insert.nxv8i32.v3i32( %vec, <3 x i32> %subvec, i64 6) + %1 = call @llvm.vector.insert.nxv8i32.v3i32( %vec, <3 x i32> %subvec, i64 6) ret %1 } -declare @llvm.experimental.vector.extract.nxv8i32.nxv3i32(, i64) -declare @llvm.experimental.vector.insert.nxv8i32.nxv3i32(, , i64) -declare @llvm.experimental.vector.insert.nxv8i32.v3i32(, <3 x i32>, i64) -declare <3 x i32> @llvm.experimental.vector.extract.nxv8i32.v3i32(, i64) -declare <3 x i32> @llvm.experimental.vector.extract.v8i32.v3i32(<8 x i32>, i64) -declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32>, i64) -declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v3i32(<8 x i32>, <3 x i32>, i64) -declare <8 x i32> @llvm.experimental.vector.insert.v8i32.v4i32(<8 x i32>, <4 x i32>, i64) +declare @llvm.vector.extract.nxv8i32.nxv3i32(, i64) +declare @llvm.vector.insert.nxv8i32.nxv3i32(, , i64) +declare @llvm.vector.insert.nxv8i32.v3i32(, <3 x i32>, i64) +declare <3 x i32> @llvm.vector.extract.nxv8i32.v3i32(, i64) +declare <3 x i32> @llvm.vector.extract.v8i32.v3i32(<8 x i32>, i64) +declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64) +declare <8 x i32> @llvm.vector.insert.v8i32.v3i32(<8 x i32>, <3 x i32>, i64) +declare <8 x i32> @llvm.vector.insert.v8i32.v4i32(<8 x i32>, <4 x i32>, i64) diff --git a/llvm/test/Verifier/insert-vector-mismatched-element-types.ll b/llvm/test/Verifier/insert-vector-mismatched-element-types.ll --- a/llvm/test/Verifier/insert-vector-mismatched-element-types.ll +++ b/llvm/test/Verifier/insert-vector-mismatched-element-types.ll @@ -1,9 +1,9 @@ ; RUN: not opt -verify -S < %s 2>&1 >/dev/null | FileCheck %s -; CHECK: experimental_vector_insert parameters must have the same element type. +; CHECK: vector_insert parameters must have the same element type. define @invalid_mismatched_element_types( %vec, <4 x i16> %subvec) nounwind { - %retval = call @llvm.experimental.vector.insert.nxv16i8.v4i16( %vec, <4 x i16> %subvec, i64 0) + %retval = call @llvm.vector.insert.nxv16i8.v4i16( %vec, <4 x i16> %subvec, i64 0) ret %retval } -declare @llvm.experimental.vector.insert.nxv16i8.v4i16(, <4 x i16>, i64) +declare @llvm.vector.insert.nxv16i8.v4i16(, <4 x i16>, i64)