diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -497,7 +497,9 @@ // 4. Width of vector arguments and return types for this function. // 5. Width of vector aguments and return types for functions called by this // function. - CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth)); + if (getContext().getTargetInfo().getTriple().isX86()) + CurFn->addFnAttr("min-legal-vector-width", + llvm::utostr(LargestVectorWidth)); // Add vscale_range attribute if appropriate. Optional> VScaleRange = diff --git a/clang/test/CodeGen/aarch64-neon-3v.c b/clang/test/CodeGen/aarch64-neon-3v.c --- a/clang/test/CodeGen/aarch64-neon-3v.c +++ b/clang/test/CodeGen/aarch64-neon-3v.c @@ -11,7 +11,7 @@ return vand_s8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vandq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vandq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, %b // CHECK: ret <16 x i8> [[AND_I]] int8x16_t test_vandq_s8(int8x16_t a, int8x16_t b) { @@ -25,7 +25,7 @@ return vand_s16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vandq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vandq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, %b // CHECK: ret <8 x i16> [[AND_I]] int16x8_t test_vandq_s16(int16x8_t a, int16x8_t b) { @@ -39,7 +39,7 @@ return vand_s32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vandq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vandq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, %b // CHECK: ret <4 x i32> [[AND_I]] int32x4_t test_vandq_s32(int32x4_t a, int32x4_t b) { @@ -53,7 +53,7 @@ return vand_s64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vandq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vandq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, %b // CHECK: ret <2 x i64> [[AND_I]] int64x2_t test_vandq_s64(int64x2_t a, int64x2_t b) { @@ -67,7 +67,7 @@ return vand_u8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vandq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vandq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, %b // CHECK: ret <16 x i8> [[AND_I]] uint8x16_t test_vandq_u8(uint8x16_t a, uint8x16_t b) { @@ -81,7 +81,7 @@ return vand_u16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vandq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vandq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, %b // CHECK: ret <8 x i16> [[AND_I]] uint16x8_t test_vandq_u16(uint16x8_t a, uint16x8_t b) { @@ -95,7 +95,7 @@ return vand_u32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vandq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vandq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, %b // CHECK: ret <4 x i32> [[AND_I]] uint32x4_t test_vandq_u32(uint32x4_t a, uint32x4_t b) { @@ -109,7 +109,7 @@ return vand_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vandq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vandq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, %b // CHECK: ret <2 x i64> [[AND_I]] uint64x2_t test_vandq_u64(uint64x2_t a, uint64x2_t b) { @@ -123,7 +123,7 @@ return vorr_s8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vorrq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vorrq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, %b // CHECK: ret <16 x i8> [[OR_I]] int8x16_t test_vorrq_s8(int8x16_t a, int8x16_t b) { @@ -137,7 +137,7 @@ return vorr_s16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vorrq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vorrq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, %b // CHECK: ret <8 x i16> [[OR_I]] int16x8_t test_vorrq_s16(int16x8_t a, int16x8_t b) { @@ -151,7 +151,7 @@ return vorr_s32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vorrq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vorrq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, %b // CHECK: ret <4 x i32> [[OR_I]] int32x4_t test_vorrq_s32(int32x4_t a, int32x4_t b) { @@ -165,7 +165,7 @@ return vorr_s64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vorrq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vorrq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, %b // CHECK: ret <2 x i64> [[OR_I]] int64x2_t test_vorrq_s64(int64x2_t a, int64x2_t b) { @@ -179,7 +179,7 @@ return vorr_u8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vorrq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vorrq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, %b // CHECK: ret <16 x i8> [[OR_I]] uint8x16_t test_vorrq_u8(uint8x16_t a, uint8x16_t b) { @@ -193,7 +193,7 @@ return vorr_u16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vorrq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vorrq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, %b // CHECK: ret <8 x i16> [[OR_I]] uint16x8_t test_vorrq_u16(uint16x8_t a, uint16x8_t b) { @@ -207,7 +207,7 @@ return vorr_u32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vorrq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vorrq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, %b // CHECK: ret <4 x i32> [[OR_I]] uint32x4_t test_vorrq_u32(uint32x4_t a, uint32x4_t b) { @@ -221,7 +221,7 @@ return vorr_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vorrq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vorrq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, %b // CHECK: ret <2 x i64> [[OR_I]] uint64x2_t test_vorrq_u64(uint64x2_t a, uint64x2_t b) { @@ -235,7 +235,7 @@ return veor_s8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_veorq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_veorq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <16 x i8> %a, %b // CHECK: ret <16 x i8> [[XOR_I]] int8x16_t test_veorq_s8(int8x16_t a, int8x16_t b) { @@ -249,7 +249,7 @@ return veor_s16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_veorq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_veorq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <8 x i16> %a, %b // CHECK: ret <8 x i16> [[XOR_I]] int16x8_t test_veorq_s16(int16x8_t a, int16x8_t b) { @@ -263,7 +263,7 @@ return veor_s32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_veorq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_veorq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <4 x i32> %a, %b // CHECK: ret <4 x i32> [[XOR_I]] int32x4_t test_veorq_s32(int32x4_t a, int32x4_t b) { @@ -277,7 +277,7 @@ return veor_s64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_veorq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_veorq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <2 x i64> %a, %b // CHECK: ret <2 x i64> [[XOR_I]] int64x2_t test_veorq_s64(int64x2_t a, int64x2_t b) { @@ -291,7 +291,7 @@ return veor_u8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_veorq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_veorq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <16 x i8> %a, %b // CHECK: ret <16 x i8> [[XOR_I]] uint8x16_t test_veorq_u8(uint8x16_t a, uint8x16_t b) { @@ -305,7 +305,7 @@ return veor_u16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_veorq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_veorq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <8 x i16> %a, %b // CHECK: ret <8 x i16> [[XOR_I]] uint16x8_t test_veorq_u16(uint16x8_t a, uint16x8_t b) { @@ -319,7 +319,7 @@ return veor_u32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_veorq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_veorq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <4 x i32> %a, %b // CHECK: ret <4 x i32> [[XOR_I]] uint32x4_t test_veorq_u32(uint32x4_t a, uint32x4_t b) { @@ -333,7 +333,7 @@ return veor_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_veorq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_veorq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[XOR_I:%.*]] = xor <2 x i64> %a, %b // CHECK: ret <2 x i64> [[XOR_I]] uint64x2_t test_veorq_u64(uint64x2_t a, uint64x2_t b) { @@ -348,7 +348,7 @@ return vbic_s8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vbicq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vbicq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[AND_I]] @@ -364,7 +364,7 @@ return vbic_s16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vbicq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vbicq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[AND_I]] @@ -380,7 +380,7 @@ return vbic_s32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vbicq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vbicq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[AND_I]] @@ -396,7 +396,7 @@ return vbic_s64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vbicq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vbicq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[AND_I]] @@ -412,7 +412,7 @@ return vbic_u8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vbicq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vbicq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[AND_I:%.*]] = and <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[AND_I]] @@ -428,7 +428,7 @@ return vbic_u16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vbicq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vbicq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[AND_I:%.*]] = and <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[AND_I]] @@ -444,7 +444,7 @@ return vbic_u32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vbicq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vbicq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[AND_I:%.*]] = and <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[AND_I]] @@ -460,7 +460,7 @@ return vbic_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vbicq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vbicq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[AND_I:%.*]] = and <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[AND_I]] @@ -476,7 +476,7 @@ return vorn_s8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vornq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vornq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[OR_I]] @@ -492,7 +492,7 @@ return vorn_s16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vornq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vornq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[OR_I]] @@ -508,7 +508,7 @@ return vorn_s32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vornq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vornq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[OR_I]] @@ -524,7 +524,7 @@ return vorn_s64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vornq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vornq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[OR_I]] @@ -540,7 +540,7 @@ return vorn_u8(a, b); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vornq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vornq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <16 x i8> %b, // CHECK: [[OR_I:%.*]] = or <16 x i8> %a, [[NEG_I]] // CHECK: ret <16 x i8> [[OR_I]] @@ -556,7 +556,7 @@ return vorn_u16(a, b); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vornq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vornq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <8 x i16> %b, // CHECK: [[OR_I:%.*]] = or <8 x i16> %a, [[NEG_I]] // CHECK: ret <8 x i16> [[OR_I]] @@ -572,7 +572,7 @@ return vorn_u32(a, b); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vornq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vornq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <4 x i32> %b, // CHECK: [[OR_I:%.*]] = or <4 x i32> %a, [[NEG_I]] // CHECK: ret <4 x i32> [[OR_I]] @@ -588,13 +588,10 @@ return vorn_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vornq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vornq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[NEG_I:%.*]] = xor <2 x i64> %b, // CHECK: [[OR_I:%.*]] = or <2 x i64> %a, [[NEG_I]] // CHECK: ret <2 x i64> [[OR_I]] uint64x2_t test_vornq_u64(uint64x2_t a, uint64x2_t b) { return vornq_u64(a, b); } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128" diff --git a/clang/test/CodeGen/aarch64-neon-extract.c b/clang/test/CodeGen/aarch64-neon-extract.c --- a/clang/test/CodeGen/aarch64-neon-extract.c +++ b/clang/test/CodeGen/aarch64-neon-extract.c @@ -45,14 +45,14 @@ return vext_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> // CHECK: ret <16 x i8> [[VEXT]] int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) { return vextq_s8(a, b, 2); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> @@ -63,7 +63,7 @@ return vextq_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vextq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vextq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> @@ -74,7 +74,7 @@ return vextq_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -125,14 +125,14 @@ return vext_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> // CHECK: ret <16 x i8> [[VEXT]] uint8x16_t test_vextq_u8(uint8x16_t a, uint8x16_t b) { return vextq_u8(a, b, 2); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> @@ -143,7 +143,7 @@ return vextq_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vextq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vextq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32> @@ -154,7 +154,7 @@ return vextq_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -187,7 +187,7 @@ return vext_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <4 x float> @test_vextq_f32(<4 x float> noundef %a, <4 x float> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x float> @test_vextq_f32(<4 x float> noundef %a, <4 x float> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float> @@ -198,7 +198,7 @@ return vextq_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} <2 x double> @test_vextq_f64(<2 x double> noundef %a, <2 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x double> @test_vextq_f64(<2 x double> noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double> @@ -227,14 +227,14 @@ return vext_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_p8(<16 x i8> noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_p8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> // CHECK: ret <16 x i8> [[VEXT]] poly8x16_t test_vextq_p8(poly8x16_t a, poly8x16_t b) { return vextq_p8(a, b, 2); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_p16(<8 x i16> noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_p16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16> diff --git a/clang/test/CodeGen/aarch64-neon-ldst-one.c b/clang/test/CodeGen/aarch64-neon-ldst-one.c --- a/clang/test/CodeGen/aarch64-neon-ldst-one.c +++ b/clang/test/CodeGen/aarch64-neon-ldst-one.c @@ -154,7 +154,7 @@ return vld1q_dup_p64(a); } -// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_dup_u8(i8* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_dup_u8(i8* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer @@ -163,7 +163,7 @@ return vld1_dup_u8(a); } -// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_dup_u16(i16* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_dup_u16(i16* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -174,7 +174,7 @@ return vld1_dup_u16(a); } -// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_dup_u32(i32* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_dup_u32(i32* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] @@ -185,7 +185,7 @@ return vld1_dup_u32(a); } -// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_dup_u64(i64* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_dup_u64(i64* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -196,7 +196,7 @@ return vld1_dup_u64(a); } -// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_dup_s8(i8* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_dup_s8(i8* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer @@ -205,7 +205,7 @@ return vld1_dup_s8(a); } -// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_dup_s16(i16* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_dup_s16(i16* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -216,7 +216,7 @@ return vld1_dup_s16(a); } -// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_dup_s32(i32* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_dup_s32(i32* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] @@ -227,7 +227,7 @@ return vld1_dup_s32(a); } -// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_dup_s64(i64* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_dup_s64(i64* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -238,7 +238,7 @@ return vld1_dup_s64(a); } -// CHECK-LABEL: define{{.*}} <4 x half> @test_vld1_dup_f16(half* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <4 x half> @test_vld1_dup_f16(half* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* // CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] @@ -249,7 +249,7 @@ return vld1_dup_f16(a); } -// CHECK-LABEL: define{{.*}} <2 x float> @test_vld1_dup_f32(float* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <2 x float> @test_vld1_dup_f32(float* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* // CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] @@ -260,7 +260,7 @@ return vld1_dup_f32(a); } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vld1_dup_f64(double* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vld1_dup_f64(double* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* // CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] @@ -271,7 +271,7 @@ return vld1_dup_f64(a); } -// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_dup_p8(i8* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_dup_p8(i8* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> poison, i8 [[TMP0]], i32 0 // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer @@ -280,7 +280,7 @@ return vld1_dup_p8(a); } -// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_dup_p16(i16* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_dup_p16(i16* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] @@ -291,7 +291,7 @@ return vld1_dup_p16(a); } -// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_dup_p64(i64* noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_dup_p64(i64* noundef %a) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] @@ -302,7 +302,7 @@ return vld1_dup_p64(a); } -// CHECK-LABEL: define{{.*}} %struct.uint64x2x2_t @test_vld2q_dup_u64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x2x2_t @test_vld2q_dup_u64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x2_t* [[__RET]] to i8* @@ -320,7 +320,7 @@ return vld2q_dup_u64(a); } -// CHECK-LABEL: define{{.*}} %struct.int64x2x2_t @test_vld2q_dup_s64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x2x2_t @test_vld2q_dup_s64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x2_t* [[__RET]] to i8* @@ -338,7 +338,7 @@ return vld2q_dup_s64(a); } -// CHECK-LABEL: define{{.*}} %struct.float64x2x2_t @test_vld2q_dup_f64(double* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x2x2_t @test_vld2q_dup_f64(double* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x2_t* [[__RET]] to i8* @@ -356,7 +356,7 @@ return vld2q_dup_f64(a); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x2_t @test_vld2q_dup_p64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x2_t @test_vld2q_dup_p64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x2_t* [[__RET]] to i8* @@ -374,7 +374,7 @@ return vld2q_dup_p64(a); } -// CHECK-LABEL: define{{.*}} %struct.float64x1x2_t @test_vld2_dup_f64(double* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x1x2_t @test_vld2_dup_f64(double* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x2_t* [[__RET]] to i8* @@ -392,7 +392,7 @@ return vld2_dup_f64(a); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x2_t @test_vld2_dup_p64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x2_t @test_vld2_dup_p64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x2_t* [[__RET]] to i8* @@ -410,7 +410,7 @@ return vld2_dup_p64(a); } -// CHECK-LABEL: define{{.*}} %struct.uint64x2x3_t @test_vld3q_dup_u64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x2x3_t @test_vld3q_dup_u64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x3_t* [[__RET]] to i8* @@ -429,7 +429,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define{{.*}} %struct.int64x2x3_t @test_vld3q_dup_s64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x2x3_t @test_vld3q_dup_s64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x3_t* [[__RET]] to i8* @@ -448,7 +448,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define{{.*}} %struct.float64x2x3_t @test_vld3q_dup_f64(double* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x2x3_t @test_vld3q_dup_f64(double* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x3_t* [[__RET]] to i8* @@ -467,7 +467,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x3_t @test_vld3q_dup_p64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x3_t @test_vld3q_dup_p64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x3_t* [[__RET]] to i8* @@ -486,7 +486,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define{{.*}} %struct.float64x1x3_t @test_vld3_dup_f64(double* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x1x3_t @test_vld3_dup_f64(double* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x3_t* [[__RET]] to i8* @@ -505,7 +505,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x3_t @test_vld3_dup_p64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x3_t @test_vld3_dup_p64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x3_t* [[__RET]] to i8* @@ -524,7 +524,7 @@ // [{{x[0-9]+|sp}}] } -// CHECK-LABEL: define{{.*}} %struct.uint64x2x4_t @test_vld4q_dup_u64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x2x4_t @test_vld4q_dup_u64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x4_t* [[__RET]] to i8* @@ -542,7 +542,7 @@ return vld4q_dup_u64(a); } -// CHECK-LABEL: define{{.*}} %struct.int64x2x4_t @test_vld4q_dup_s64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x2x4_t @test_vld4q_dup_s64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x4_t* [[__RET]] to i8* @@ -560,7 +560,7 @@ return vld4q_dup_s64(a); } -// CHECK-LABEL: define{{.*}} %struct.float64x2x4_t @test_vld4q_dup_f64(double* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x2x4_t @test_vld4q_dup_f64(double* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x4_t* [[__RET]] to i8* @@ -578,7 +578,7 @@ return vld4q_dup_f64(a); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x4_t @test_vld4q_dup_p64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x4_t @test_vld4q_dup_p64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x4_t* [[__RET]] to i8* @@ -596,7 +596,7 @@ return vld4q_dup_p64(a); } -// CHECK-LABEL: define{{.*}} %struct.float64x1x4_t @test_vld4_dup_f64(double* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x1x4_t @test_vld4_dup_f64(double* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x4_t* [[__RET]] to i8* @@ -614,7 +614,7 @@ return vld4_dup_f64(a); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x4_t @test_vld4_dup_p64(i64* noundef %a) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x4_t @test_vld4_dup_p64(i64* noundef %a) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x4_t* [[__RET]] to i8* @@ -788,7 +788,7 @@ return vld1q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 // CHECK: ret <8 x i8> [[VLD1_LANE]] @@ -796,7 +796,7 @@ return vld1_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -808,7 +808,7 @@ return vld1_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -820,7 +820,7 @@ return vld1_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -832,7 +832,7 @@ return vld1_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 // CHECK: ret <8 x i8> [[VLD1_LANE]] @@ -840,7 +840,7 @@ return vld1_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -852,7 +852,7 @@ return vld1_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i32> @test_vld1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -864,7 +864,7 @@ return vld1_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -876,7 +876,7 @@ return vld1_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <4 x half> @test_vld1_lane_f16(half* noundef %a, <4 x half> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x half> @test_vld1_lane_f16(half* noundef %a, <4 x half> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> @@ -888,7 +888,7 @@ return vld1_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <2 x float> @test_vld1_lane_f32(float* noundef %a, <2 x float> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x float> @test_vld1_lane_f32(float* noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> @@ -900,7 +900,7 @@ return vld1_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vld1_lane_f64(double* noundef %a, <1 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vld1_lane_f64(double* noundef %a, <1 x double> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> @@ -912,7 +912,7 @@ return vld1_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i8> @test_vld1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = load i8, i8* %a // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 // CHECK: ret <8 x i8> [[VLD1_LANE]] @@ -920,7 +920,7 @@ return vld1_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i16> @test_vld1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -932,7 +932,7 @@ return vld1_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <1 x i64> @test_vld1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -944,7 +944,7 @@ return vld1_lane_p64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.int8x16x2_t @test_vld2q_lane_s8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int8x16x2_t @test_vld2q_lane_s8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int8x16x2_t, align 16 @@ -973,7 +973,7 @@ return vld2q_lane_s8(ptr, src, 15); } -// CHECK-LABEL: define{{.*}} %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x2_t, align 16 @@ -1002,7 +1002,7 @@ return vld2q_lane_u8(ptr, src, 15); } -// CHECK-LABEL: define{{.*}} %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x2_t, align 16 @@ -1031,7 +1031,7 @@ return vld2q_lane_p8(ptr, src, 15); } -// CHECK-LABEL: define{{.*}} %struct.int8x16x3_t @test_vld3q_lane_s8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int8x16x3_t @test_vld3q_lane_s8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int8x16x3_t, align 16 @@ -1063,7 +1063,7 @@ return vld3q_lane_s8(ptr, src, 15); } -// CHECK-LABEL: define{{.*}} %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x3_t, align 16 @@ -1095,7 +1095,7 @@ return vld3q_lane_u8(ptr, src, 15); } -// CHECK-LABEL: define{{.*}} %struct.uint16x8x2_t @test_vld2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint16x8x2_t @test_vld2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x2_t, align 16 @@ -1129,7 +1129,7 @@ return vld2q_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.uint32x4x2_t @test_vld2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint32x4x2_t @test_vld2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x2_t, align 16 @@ -1163,7 +1163,7 @@ return vld2q_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.uint64x2x2_t @test_vld2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x2x2_t @test_vld2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x2_t, align 16 @@ -1197,7 +1197,7 @@ return vld2q_lane_u64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.int16x8x2_t @test_vld2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int16x8x2_t @test_vld2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int16x8x2_t, align 16 @@ -1231,7 +1231,7 @@ return vld2q_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.int32x4x2_t @test_vld2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int32x4x2_t @test_vld2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int32x4x2_t, align 16 @@ -1265,7 +1265,7 @@ return vld2q_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.int64x2x2_t @test_vld2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x2x2_t @test_vld2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x2_t, align 16 @@ -1299,7 +1299,7 @@ return vld2q_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.float16x8x2_t @test_vld2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float16x8x2_t @test_vld2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float16x8x2_t, align 16 @@ -1333,7 +1333,7 @@ return vld2q_lane_f16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.float32x4x2_t @test_vld2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float32x4x2_t @test_vld2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float32x4x2_t, align 16 @@ -1367,7 +1367,7 @@ return vld2q_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.float64x2x2_t @test_vld2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x2x2_t @test_vld2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 @@ -1401,7 +1401,7 @@ return vld2q_lane_f64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.poly16x8x2_t @test_vld2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly16x8x2_t @test_vld2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x2_t, align 16 @@ -1435,7 +1435,7 @@ return vld2q_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x2_t @test_vld2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x2_t @test_vld2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 @@ -1469,7 +1469,7 @@ return vld2q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.uint8x8x2_t @test_vld2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint8x8x2_t @test_vld2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x2_t, align 8 @@ -1498,7 +1498,7 @@ return vld2_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.uint16x4x2_t @test_vld2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint16x4x2_t @test_vld2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x2_t, align 8 @@ -1532,7 +1532,7 @@ return vld2_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.uint32x2x2_t @test_vld2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint32x2x2_t @test_vld2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x2_t, align 8 @@ -1566,7 +1566,7 @@ return vld2_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.uint64x1x2_t @test_vld2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x1x2_t @test_vld2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x2_t, align 8 @@ -1600,7 +1600,7 @@ return vld2_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.int8x8x2_t @test_vld2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int8x8x2_t @test_vld2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int8x8x2_t, align 8 @@ -1629,7 +1629,7 @@ return vld2_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.int16x4x2_t @test_vld2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int16x4x2_t @test_vld2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int16x4x2_t, align 8 @@ -1663,7 +1663,7 @@ return vld2_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.int32x2x2_t @test_vld2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int32x2x2_t @test_vld2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int32x2x2_t, align 8 @@ -1697,7 +1697,7 @@ return vld2_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.int64x1x2_t @test_vld2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x1x2_t @test_vld2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int64x1x2_t, align 8 @@ -1731,7 +1731,7 @@ return vld2_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.float16x4x2_t @test_vld2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float16x4x2_t @test_vld2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x2_t, align 8 @@ -1765,7 +1765,7 @@ return vld2_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.float32x2x2_t @test_vld2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float32x2x2_t @test_vld2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float32x2x2_t, align 8 @@ -1799,7 +1799,7 @@ return vld2_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.float64x1x2_t @test_vld2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x1x2_t @test_vld2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 @@ -1833,7 +1833,7 @@ return vld2_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.poly8x8x2_t @test_vld2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly8x8x2_t @test_vld2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x2_t, align 8 @@ -1862,7 +1862,7 @@ return vld2_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.poly16x4x2_t @test_vld2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly16x4x2_t @test_vld2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x2_t, align 8 @@ -1896,7 +1896,7 @@ return vld2_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x2_t @test_vld2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x2_t @test_vld2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 @@ -1930,7 +1930,7 @@ return vld2_lane_p64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.uint16x8x3_t @test_vld3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint16x8x3_t @test_vld3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x3_t, align 16 @@ -1969,7 +1969,7 @@ return vld3q_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.uint32x4x3_t @test_vld3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint32x4x3_t @test_vld3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x3_t, align 16 @@ -2008,7 +2008,7 @@ return vld3q_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.uint64x2x3_t @test_vld3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x2x3_t @test_vld3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x3_t, align 16 @@ -2047,7 +2047,7 @@ return vld3q_lane_u64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.int16x8x3_t @test_vld3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int16x8x3_t @test_vld3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int16x8x3_t, align 16 @@ -2086,7 +2086,7 @@ return vld3q_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.int32x4x3_t @test_vld3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int32x4x3_t @test_vld3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int32x4x3_t, align 16 @@ -2125,7 +2125,7 @@ return vld3q_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.int64x2x3_t @test_vld3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x2x3_t @test_vld3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x3_t, align 16 @@ -2164,7 +2164,7 @@ return vld3q_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.float16x8x3_t @test_vld3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float16x8x3_t @test_vld3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float16x8x3_t, align 16 @@ -2203,7 +2203,7 @@ return vld3q_lane_f16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.float32x4x3_t @test_vld3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float32x4x3_t @test_vld3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float32x4x3_t, align 16 @@ -2242,7 +2242,7 @@ return vld3q_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.float64x2x3_t @test_vld3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x2x3_t @test_vld3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 @@ -2281,7 +2281,7 @@ return vld3q_lane_f64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.poly8x16x3_t @test_vld3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly8x16x3_t @test_vld3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x3_t, align 16 @@ -2313,7 +2313,7 @@ return vld3q_lane_p8(a, b, 15); } -// CHECK-LABEL: define{{.*}} %struct.poly16x8x3_t @test_vld3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly16x8x3_t @test_vld3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x3_t, align 16 @@ -2352,7 +2352,7 @@ return vld3q_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x3_t @test_vld3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x3_t @test_vld3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 @@ -2391,7 +2391,7 @@ return vld3q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.uint8x8x3_t @test_vld3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint8x8x3_t @test_vld3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x3_t, align 8 @@ -2423,7 +2423,7 @@ return vld3_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.uint16x4x3_t @test_vld3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint16x4x3_t @test_vld3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x3_t, align 8 @@ -2462,7 +2462,7 @@ return vld3_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.uint32x2x3_t @test_vld3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint32x2x3_t @test_vld3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x3_t, align 8 @@ -2501,7 +2501,7 @@ return vld3_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.uint64x1x3_t @test_vld3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x1x3_t @test_vld3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x3_t, align 8 @@ -2540,7 +2540,7 @@ return vld3_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.int8x8x3_t @test_vld3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int8x8x3_t @test_vld3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int8x8x3_t, align 8 @@ -2572,7 +2572,7 @@ return vld3_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.int16x4x3_t @test_vld3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int16x4x3_t @test_vld3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int16x4x3_t, align 8 @@ -2611,7 +2611,7 @@ return vld3_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.int32x2x3_t @test_vld3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int32x2x3_t @test_vld3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int32x2x3_t, align 8 @@ -2650,7 +2650,7 @@ return vld3_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.int64x1x3_t @test_vld3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x1x3_t @test_vld3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int64x1x3_t, align 8 @@ -2689,7 +2689,7 @@ return vld3_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.float16x4x3_t @test_vld3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float16x4x3_t @test_vld3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x3_t, align 8 @@ -2728,7 +2728,7 @@ return vld3_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.float32x2x3_t @test_vld3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float32x2x3_t @test_vld3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float32x2x3_t, align 8 @@ -2767,7 +2767,7 @@ return vld3_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.float64x1x3_t @test_vld3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x1x3_t @test_vld3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 @@ -2806,7 +2806,7 @@ return vld3_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.poly8x8x3_t @test_vld3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly8x8x3_t @test_vld3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x3_t, align 8 @@ -2838,7 +2838,7 @@ return vld3_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.poly16x4x3_t @test_vld3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly16x4x3_t @test_vld3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x3_t, align 8 @@ -2877,7 +2877,7 @@ return vld3_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x3_t @test_vld3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x3_t @test_vld3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 @@ -2916,7 +2916,7 @@ return vld3_lane_p64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.uint8x16x4_t @test_vld4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint8x16x4_t @test_vld4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint8x16x4_t, align 16 @@ -2951,7 +2951,7 @@ return vld4q_lane_u8(a, b, 15); } -// CHECK-LABEL: define{{.*}} %struct.uint16x8x4_t @test_vld4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint16x8x4_t @test_vld4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint16x8x4_t, align 16 @@ -2995,7 +2995,7 @@ return vld4q_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.uint32x4x4_t @test_vld4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint32x4x4_t @test_vld4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint32x4x4_t, align 16 @@ -3039,7 +3039,7 @@ return vld4q_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.uint64x2x4_t @test_vld4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x2x4_t @test_vld4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x4_t, align 16 @@ -3083,7 +3083,7 @@ return vld4q_lane_u64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.int8x16x4_t @test_vld4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int8x16x4_t @test_vld4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int8x16x4_t, align 16 @@ -3118,7 +3118,7 @@ return vld4q_lane_s8(a, b, 15); } -// CHECK-LABEL: define{{.*}} %struct.int16x8x4_t @test_vld4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int16x8x4_t @test_vld4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int16x8x4_t, align 16 @@ -3162,7 +3162,7 @@ return vld4q_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.int32x4x4_t @test_vld4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int32x4x4_t @test_vld4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int32x4x4_t, align 16 @@ -3206,7 +3206,7 @@ return vld4q_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.int64x2x4_t @test_vld4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x2x4_t @test_vld4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x4_t, align 16 @@ -3250,7 +3250,7 @@ return vld4q_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.float16x8x4_t @test_vld4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float16x8x4_t @test_vld4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float16x8x4_t, align 16 @@ -3294,7 +3294,7 @@ return vld4q_lane_f16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.float32x4x4_t @test_vld4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float32x4x4_t @test_vld4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float32x4x4_t, align 16 @@ -3338,7 +3338,7 @@ return vld4q_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.float64x2x4_t @test_vld4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x2x4_t @test_vld4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 @@ -3382,7 +3382,7 @@ return vld4q_lane_f64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.poly8x16x4_t @test_vld4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly8x16x4_t @test_vld4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly8x16x4_t, align 16 @@ -3417,7 +3417,7 @@ return vld4q_lane_p8(a, b, 15); } -// CHECK-LABEL: define{{.*}} %struct.poly16x8x4_t @test_vld4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly16x8x4_t @test_vld4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly16x8x4_t, align 16 @@ -3461,7 +3461,7 @@ return vld4q_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x4_t @test_vld4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x4_t @test_vld4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 @@ -3505,7 +3505,7 @@ return vld4q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.uint8x8x4_t @test_vld4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint8x8x4_t @test_vld4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint8x8x4_t, align 8 @@ -3540,7 +3540,7 @@ return vld4_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.uint16x4x4_t @test_vld4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint16x4x4_t @test_vld4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint16x4x4_t, align 8 @@ -3584,7 +3584,7 @@ return vld4_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.uint32x2x4_t @test_vld4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint32x2x4_t @test_vld4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint32x2x4_t, align 8 @@ -3628,7 +3628,7 @@ return vld4_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.uint64x1x4_t @test_vld4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.uint64x1x4_t @test_vld4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.uint64x1x4_t, align 8 @@ -3672,7 +3672,7 @@ return vld4_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.int8x8x4_t @test_vld4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int8x8x4_t @test_vld4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int8x8x4_t, align 8 @@ -3707,7 +3707,7 @@ return vld4_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.int16x4x4_t @test_vld4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int16x4x4_t @test_vld4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int16x4x4_t, align 8 @@ -3751,7 +3751,7 @@ return vld4_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.int32x2x4_t @test_vld4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int32x2x4_t @test_vld4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int32x2x4_t, align 8 @@ -3795,7 +3795,7 @@ return vld4_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.int64x1x4_t @test_vld4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.int64x1x4_t @test_vld4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.int64x1x4_t, align 8 @@ -3839,7 +3839,7 @@ return vld4_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.float16x4x4_t @test_vld4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float16x4x4_t @test_vld4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float16x4x4_t, align 8 @@ -3883,7 +3883,7 @@ return vld4_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.float32x2x4_t @test_vld4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float32x2x4_t @test_vld4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float32x2x4_t, align 8 @@ -3927,7 +3927,7 @@ return vld4_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} %struct.float64x1x4_t @test_vld4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.float64x1x4_t @test_vld4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 @@ -3971,7 +3971,7 @@ return vld4_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} %struct.poly8x8x4_t @test_vld4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly8x8x4_t @test_vld4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly8x8x4_t, align 8 @@ -4006,7 +4006,7 @@ return vld4_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} %struct.poly16x4x4_t @test_vld4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly16x4x4_t @test_vld4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly16x4x4_t, align 8 @@ -4050,7 +4050,7 @@ return vld4_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x4_t @test_vld4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x4_t @test_vld4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 @@ -4250,7 +4250,7 @@ vst1q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4258,7 +4258,7 @@ vst1_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -4270,7 +4270,7 @@ vst1_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -4282,7 +4282,7 @@ vst1_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -4294,7 +4294,7 @@ vst1_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4302,7 +4302,7 @@ vst1_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -4314,7 +4314,7 @@ vst1_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> @@ -4326,7 +4326,7 @@ vst1_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -4338,7 +4338,7 @@ vst1_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_f16(half* noundef %a, <4 x half> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_f16(half* noundef %a, <4 x half> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> @@ -4350,7 +4350,7 @@ vst1_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_f32(float* noundef %a, <2 x float> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_f32(float* noundef %a, <2 x float> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> @@ -4362,7 +4362,7 @@ vst1_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_f64(double* noundef %a, <1 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_f64(double* noundef %a, <1 x double> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> @@ -4374,7 +4374,7 @@ vst1_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 // CHECK: store i8 [[TMP0]], i8* %a // CHECK: ret void @@ -4382,7 +4382,7 @@ vst1_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> @@ -4394,7 +4394,7 @@ vst1_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> @@ -4406,7 +4406,7 @@ vst1_lane_p64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0 @@ -4426,7 +4426,7 @@ vst2q_lane_u8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0 @@ -4451,7 +4451,7 @@ vst2q_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0 @@ -4476,7 +4476,7 @@ vst2q_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, %struct.uint64x2x2_t* [[B]], i32 0, i32 0 @@ -4501,7 +4501,7 @@ vst2q_lane_u64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0 @@ -4521,7 +4521,7 @@ vst2q_lane_s8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0 @@ -4546,7 +4546,7 @@ vst2q_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0 @@ -4571,7 +4571,7 @@ vst2q_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x2_t, %struct.int64x2x2_t* [[B]], i32 0, i32 0 @@ -4596,7 +4596,7 @@ vst2q_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0 @@ -4621,7 +4621,7 @@ vst2q_lane_f16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0 @@ -4646,7 +4646,7 @@ vst2q_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x2_t, %struct.float64x2x2_t* [[B]], i32 0, i32 0 @@ -4671,7 +4671,7 @@ vst2q_lane_f64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_p8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_p8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0 @@ -4691,7 +4691,7 @@ vst2q_lane_p8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0 @@ -4716,7 +4716,7 @@ vst2q_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, %struct.poly64x2x2_t* [[B]], i32 0, i32 0 @@ -4741,7 +4741,7 @@ vst2q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, %struct.uint8x8x2_t* [[B]], i32 0, i32 0 @@ -4761,7 +4761,7 @@ vst2_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, %struct.uint16x4x2_t* [[B]], i32 0, i32 0 @@ -4786,7 +4786,7 @@ vst2_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, %struct.uint32x2x2_t* [[B]], i32 0, i32 0 @@ -4811,7 +4811,7 @@ vst2_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, %struct.uint64x1x2_t* [[B]], i32 0, i32 0 @@ -4836,7 +4836,7 @@ vst2_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, %struct.int8x8x2_t* [[B]], i32 0, i32 0 @@ -4856,7 +4856,7 @@ vst2_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x2_t, %struct.int16x4x2_t* [[B]], i32 0, i32 0 @@ -4881,7 +4881,7 @@ vst2_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x2_t, %struct.int32x2x2_t* [[B]], i32 0, i32 0 @@ -4906,7 +4906,7 @@ vst2_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x2_t, %struct.int64x1x2_t* [[B]], i32 0, i32 0 @@ -4931,7 +4931,7 @@ vst2_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x2_t, %struct.float16x4x2_t* [[B]], i32 0, i32 0 @@ -4956,7 +4956,7 @@ vst2_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x2_t, %struct.float32x2x2_t* [[B]], i32 0, i32 0 @@ -4981,7 +4981,7 @@ vst2_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x2_t, %struct.float64x1x2_t* [[B]], i32 0, i32 0 @@ -5006,7 +5006,7 @@ vst2_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, %struct.poly8x8x2_t* [[B]], i32 0, i32 0 @@ -5026,7 +5026,7 @@ vst2_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, %struct.poly16x4x2_t* [[B]], i32 0, i32 0 @@ -5051,7 +5051,7 @@ vst2_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, %struct.poly64x1x2_t* [[B]], i32 0, i32 0 @@ -5076,7 +5076,7 @@ vst2_lane_p64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0 @@ -5099,7 +5099,7 @@ vst3q_lane_u8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0 @@ -5129,7 +5129,7 @@ vst3q_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0 @@ -5159,7 +5159,7 @@ vst3q_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, %struct.uint64x2x3_t* [[B]], i32 0, i32 0 @@ -5189,7 +5189,7 @@ vst3q_lane_u64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0 @@ -5212,7 +5212,7 @@ vst3q_lane_s8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0 @@ -5242,7 +5242,7 @@ vst3q_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0 @@ -5272,7 +5272,7 @@ vst3q_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x3_t, %struct.int64x2x3_t* [[B]], i32 0, i32 0 @@ -5302,7 +5302,7 @@ vst3q_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0 @@ -5332,7 +5332,7 @@ vst3q_lane_f16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0 @@ -5362,7 +5362,7 @@ vst3q_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x3_t, %struct.float64x2x3_t* [[B]], i32 0, i32 0 @@ -5392,7 +5392,7 @@ vst3q_lane_f64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0 @@ -5415,7 +5415,7 @@ vst3q_lane_p8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0 @@ -5445,7 +5445,7 @@ vst3q_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, %struct.poly64x2x3_t* [[B]], i32 0, i32 0 @@ -5475,7 +5475,7 @@ vst3q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, %struct.uint8x8x3_t* [[B]], i32 0, i32 0 @@ -5498,7 +5498,7 @@ vst3_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, %struct.uint16x4x3_t* [[B]], i32 0, i32 0 @@ -5528,7 +5528,7 @@ vst3_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, %struct.uint32x2x3_t* [[B]], i32 0, i32 0 @@ -5558,7 +5558,7 @@ vst3_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, %struct.uint64x1x3_t* [[B]], i32 0, i32 0 @@ -5588,7 +5588,7 @@ vst3_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, %struct.int8x8x3_t* [[B]], i32 0, i32 0 @@ -5611,7 +5611,7 @@ vst3_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x3_t, %struct.int16x4x3_t* [[B]], i32 0, i32 0 @@ -5641,7 +5641,7 @@ vst3_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x3_t, %struct.int32x2x3_t* [[B]], i32 0, i32 0 @@ -5671,7 +5671,7 @@ vst3_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x3_t, %struct.int64x1x3_t* [[B]], i32 0, i32 0 @@ -5701,7 +5701,7 @@ vst3_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x3_t, %struct.float16x4x3_t* [[B]], i32 0, i32 0 @@ -5731,7 +5731,7 @@ vst3_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x3_t, %struct.float32x2x3_t* [[B]], i32 0, i32 0 @@ -5761,7 +5761,7 @@ vst3_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x3_t, %struct.float64x1x3_t* [[B]], i32 0, i32 0 @@ -5791,7 +5791,7 @@ vst3_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, %struct.poly8x8x3_t* [[B]], i32 0, i32 0 @@ -5814,7 +5814,7 @@ vst3_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, %struct.poly16x4x3_t* [[B]], i32 0, i32 0 @@ -5844,7 +5844,7 @@ vst3_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, %struct.poly64x1x3_t* [[B]], i32 0, i32 0 @@ -5874,7 +5874,7 @@ vst3_lane_p64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0 @@ -5900,7 +5900,7 @@ vst4q_lane_u8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0 @@ -5935,7 +5935,7 @@ vst4q_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0 @@ -5970,7 +5970,7 @@ vst4q_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, %struct.uint64x2x4_t* [[B]], i32 0, i32 0 @@ -6005,7 +6005,7 @@ vst4q_lane_u64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0 @@ -6031,7 +6031,7 @@ vst4q_lane_s8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0 @@ -6066,7 +6066,7 @@ vst4q_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0 @@ -6101,7 +6101,7 @@ vst4q_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x4_t, %struct.int64x2x4_t* [[B]], i32 0, i32 0 @@ -6136,7 +6136,7 @@ vst4q_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0 @@ -6171,7 +6171,7 @@ vst4q_lane_f16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0 @@ -6206,7 +6206,7 @@ vst4q_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x4_t, %struct.float64x2x4_t* [[B]], i32 0, i32 0 @@ -6241,7 +6241,7 @@ vst4q_lane_f64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0 @@ -6267,7 +6267,7 @@ vst4q_lane_p8(a, b, 15); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0 @@ -6302,7 +6302,7 @@ vst4q_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, %struct.poly64x2x4_t* [[B]], i32 0, i32 0 @@ -6337,7 +6337,7 @@ vst4q_lane_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, %struct.uint8x8x4_t* [[B]], i32 0, i32 0 @@ -6363,7 +6363,7 @@ vst4_lane_u8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, %struct.uint16x4x4_t* [[B]], i32 0, i32 0 @@ -6398,7 +6398,7 @@ vst4_lane_u16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, %struct.uint32x2x4_t* [[B]], i32 0, i32 0 @@ -6433,7 +6433,7 @@ vst4_lane_u32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, %struct.uint64x1x4_t* [[B]], i32 0, i32 0 @@ -6468,7 +6468,7 @@ vst4_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, %struct.int8x8x4_t* [[B]], i32 0, i32 0 @@ -6494,7 +6494,7 @@ vst4_lane_s8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x4_t, %struct.int16x4x4_t* [[B]], i32 0, i32 0 @@ -6529,7 +6529,7 @@ vst4_lane_s16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x4_t, %struct.int32x2x4_t* [[B]], i32 0, i32 0 @@ -6564,7 +6564,7 @@ vst4_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x4_t, %struct.int64x1x4_t* [[B]], i32 0, i32 0 @@ -6599,7 +6599,7 @@ vst4_lane_s64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x4_t, %struct.float16x4x4_t* [[B]], i32 0, i32 0 @@ -6634,7 +6634,7 @@ vst4_lane_f16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x4_t, %struct.float32x2x4_t* [[B]], i32 0, i32 0 @@ -6669,7 +6669,7 @@ vst4_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x4_t, %struct.float64x1x4_t* [[B]], i32 0, i32 0 @@ -6704,7 +6704,7 @@ vst4_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, %struct.poly8x8x4_t* [[B]], i32 0, i32 0 @@ -6730,7 +6730,7 @@ vst4_lane_p8(a, b, 7); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, %struct.poly16x4x4_t* [[B]], i32 0, i32 0 @@ -6765,7 +6765,7 @@ vst4_lane_p16(a, b, 3); } -// CHECK-LABEL: define{{.*}} void @test_vst4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #0 { // CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, %struct.poly64x1x4_t* [[B]], i32 0, i32 0 @@ -6799,7 +6799,3 @@ void test_vst4_lane_p64(poly64_t *a, poly64x1x4_t b) { vst4_lane_p64(a, b, 0); } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="128" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="64" -// CHECK: attributes #2 ={{.*}}"min-legal-vector-width"="0" diff --git a/clang/test/CodeGen/aarch64-neon-scalar-copy.c b/clang/test/CodeGen/aarch64-neon-scalar-copy.c --- a/clang/test/CodeGen/aarch64-neon-scalar-copy.c +++ b/clang/test/CodeGen/aarch64-neon-scalar-copy.c @@ -21,7 +21,7 @@ } -// CHECK-LABEL: define{{.*}} float @test_vdups_laneq_f32(<4 x float> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} float @test_vdups_laneq_f32(<4 x float> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %a, i32 3 // CHECK: ret float [[VGETQ_LANE]] float32_t test_vdups_laneq_f32(float32x4_t a) { @@ -29,7 +29,7 @@ } -// CHECK-LABEL: define{{.*}} double @test_vdupd_laneq_f64(<2 x double> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} double @test_vdupd_laneq_f64(<2 x double> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %a, i32 1 // CHECK: ret double [[VGETQ_LANE]] float64_t test_vdupd_laneq_f64(float64x2_t a) { @@ -100,7 +100,7 @@ return vdupd_lane_u64(a, 0); } -// CHECK-LABEL: define{{.*}} i8 @test_vdupb_laneq_s8(<16 x i8> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i8 @test_vdupb_laneq_s8(<16 x i8> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] int8_t test_vdupb_laneq_s8(int8x16_t a) { @@ -108,7 +108,7 @@ } -// CHECK-LABEL: define{{.*}} i16 @test_vduph_laneq_s16(<8 x i16> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vduph_laneq_s16(<8 x i16> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] int16_t test_vduph_laneq_s16(int16x8_t a) { @@ -116,7 +116,7 @@ } -// CHECK-LABEL: define{{.*}} i32 @test_vdups_laneq_s32(<4 x i32> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vdups_laneq_s32(<4 x i32> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] int32_t test_vdups_laneq_s32(int32x4_t a) { @@ -124,7 +124,7 @@ } -// CHECK-LABEL: define{{.*}} i64 @test_vdupd_laneq_s64(<2 x i64> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vdupd_laneq_s64(<2 x i64> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] int64_t test_vdupd_laneq_s64(int64x2_t a) { @@ -132,7 +132,7 @@ } -// CHECK-LABEL: define{{.*}} i8 @test_vdupb_laneq_u8(<16 x i8> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i8 @test_vdupb_laneq_u8(<16 x i8> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] uint8_t test_vdupb_laneq_u8(uint8x16_t a) { @@ -140,7 +140,7 @@ } -// CHECK-LABEL: define{{.*}} i16 @test_vduph_laneq_u16(<8 x i16> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vduph_laneq_u16(<8 x i16> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] uint16_t test_vduph_laneq_u16(uint16x8_t a) { @@ -148,7 +148,7 @@ } -// CHECK-LABEL: define{{.*}} i32 @test_vdups_laneq_u32(<4 x i32> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vdups_laneq_u32(<4 x i32> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] uint32_t test_vdups_laneq_u32(uint32x4_t a) { @@ -156,7 +156,7 @@ } -// CHECK-LABEL: define{{.*}} i64 @test_vdupd_laneq_u64(<2 x i64> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vdupd_laneq_u64(<2 x i64> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] uint64_t test_vdupd_laneq_u64(uint64x2_t a) { @@ -177,19 +177,16 @@ return vduph_lane_p16(a, 3); } -// CHECK-LABEL: define{{.*}} i8 @test_vdupb_laneq_p8(<16 x i8> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i8 @test_vdupb_laneq_p8(<16 x i8> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] poly8_t test_vdupb_laneq_p8(poly8x16_t a) { return vdupb_laneq_p8(a, 15); } -// CHECK-LABEL: define{{.*}} i16 @test_vduph_laneq_p16(<8 x i16> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vduph_laneq_p16(<8 x i16> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] poly16_t test_vduph_laneq_p16(poly16x8_t a) { return vduph_laneq_p16(a, 7); } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128" diff --git a/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c b/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c --- a/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c +++ b/clang/test/CodeGen/aarch64-neon-scalar-x-indexed-elem.c @@ -22,7 +22,7 @@ return vmuld_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} float @test_vmuls_laneq_f32(float noundef %a, <4 x float> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} float @test_vmuls_laneq_f32(float noundef %a, <4 x float> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %b, i32 3 // CHECK: [[MUL:%.*]] = fmul float %a, [[VGETQ_LANE]] // CHECK: ret float [[MUL]] @@ -30,7 +30,7 @@ return vmuls_laneq_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} double @test_vmuld_laneq_f64(double noundef %a, <2 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} double @test_vmuld_laneq_f64(double noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 1 // CHECK: [[MUL:%.*]] = fmul double %a, [[VGETQ_LANE]] // CHECK: ret double [[MUL]] @@ -55,7 +55,7 @@ return vmulxs_lane_f32(a, b, 1); } -// CHECK-LABEL: define{{.*}} float @test_vmulxs_laneq_f32(float noundef %a, <4 x float> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} float @test_vmulxs_laneq_f32(float noundef %a, <4 x float> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %b, i32 3 // CHECK: [[VMULXS_F32_I:%.*]] = call float @llvm.aarch64.neon.fmulx.f32(float %a, float [[VGETQ_LANE]]) // CHECK: ret float [[VMULXS_F32_I]] @@ -71,7 +71,7 @@ return vmulxd_lane_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} double @test_vmulxd_laneq_f64(double noundef %a, <2 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} double @test_vmulxd_laneq_f64(double noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 1 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double %a, double [[VGETQ_LANE]]) // CHECK: ret double [[VMULXD_F64_I]] @@ -90,7 +90,7 @@ } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vmulx_laneq_f64_0(<1 x double> noundef %a, <2 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vmulx_laneq_f64_0(<1 x double> noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %a, i32 0 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 0 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) @@ -100,7 +100,7 @@ return vmulx_laneq_f64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vmulx_laneq_f64_1(<1 x double> noundef %a, <2 x double> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vmulx_laneq_f64_1(<1 x double> noundef %a, <2 x double> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x double> %a, i32 0 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x double> %b, i32 1 // CHECK: [[VMULXD_F64_I:%.*]] = call double @llvm.aarch64.neon.fmulx.f64(double [[VGET_LANE]], double [[VGETQ_LANE]]) @@ -127,7 +127,7 @@ return vfmad_lane_f64(a, b, c, 0); } -// CHECK-LABEL: define{{.*}} double @test_vfmad_laneq_f64(double noundef %a, double noundef %b, <2 x double> noundef %c) #1 { +// CHECK-LABEL: define{{.*}} double @test_vfmad_laneq_f64(double noundef %a, double noundef %b, <2 x double> noundef %c) #0 { // CHECK: [[EXTRACT:%.*]] = extractelement <2 x double> %c, i32 1 // CHECK: [[TMP2:%.*]] = call double @llvm.fma.f64(double %b, double [[EXTRACT]], double %a) // CHECK: ret double [[TMP2]] @@ -173,7 +173,7 @@ return vfms_lane_f64(a, b, v, 0); } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vfma_laneq_f64(<1 x double> noundef %a, <1 x double> noundef %b, <2 x double> noundef %v) #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vfma_laneq_f64(<1 x double> noundef %a, <1 x double> noundef %b, <2 x double> noundef %v) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> // CHECK: [[TMP2:%.*]] = bitcast <2 x double> %v to <16 x i8> @@ -188,7 +188,7 @@ return vfma_laneq_f64(a, b, v, 0); } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vfms_laneq_f64(<1 x double> noundef %a, <1 x double> noundef %b, <2 x double> noundef %v) #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vfms_laneq_f64(<1 x double> noundef %a, <1 x double> noundef %b, <2 x double> noundef %v) #0 { // CHECK: [[SUB:%.*]] = fneg <1 x double> %b // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <1 x double> [[SUB]] to <8 x i8> @@ -223,7 +223,7 @@ return vqdmulls_lane_s32(a, b, 1); } -// CHECK-LABEL: define{{.*}} i32 @test_vqdmullh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vqdmullh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %b, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0 @@ -234,7 +234,7 @@ return vqdmullh_laneq_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} i64 @test_vqdmulls_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vqdmulls_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %b, i32 3 // CHECK: [[VQDMULLS_S32_I:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 [[VGETQ_LANE]]) // CHECK: ret i64 [[VQDMULLS_S32_I]] @@ -262,7 +262,7 @@ } -// CHECK-LABEL: define{{.*}} i16 @test_vqdmulhh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vqdmulhh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %b, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0 @@ -274,7 +274,7 @@ } -// CHECK-LABEL: define{{.*}} i32 @test_vqdmulhs_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vqdmulhs_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %b, i32 3 // CHECK: [[VQDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqdmulh.i32(i32 %a, i32 [[VGETQ_LANE]]) // CHECK: ret i32 [[VQDMULHS_S32_I]] @@ -302,7 +302,7 @@ } -// CHECK-LABEL: define{{.*}} i16 @test_vqrdmulhh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vqrdmulhh_laneq_s16(i16 noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %b, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> undef, i16 %a, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> undef, i16 [[VGETQ_LANE]], i64 0 @@ -314,7 +314,7 @@ } -// CHECK-LABEL: define{{.*}} i32 @test_vqrdmulhs_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vqrdmulhs_laneq_s32(i32 noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %b, i32 3 // CHECK: [[VQRDMULHS_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 %a, i32 [[VGETQ_LANE]]) // CHECK: ret i32 [[VQRDMULHS_S32_I]] @@ -343,7 +343,7 @@ return vqdmlals_lane_s32(a, b, c, 1); } -// CHECK-LABEL: define{{.*}} i32 @test_vqdmlalh_laneq_s16(i32 noundef %a, i16 noundef %b, <8 x i16> noundef %c) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vqdmlalh_laneq_s16(i32 noundef %a, i16 noundef %b, <8 x i16> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <8 x i16> %c, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> poison, i16 [[LANE]], i64 0 @@ -355,7 +355,7 @@ return vqdmlalh_laneq_s16(a, b, c, 7); } -// CHECK-LABEL: define{{.*}} i64 @test_vqdmlals_laneq_s32(i64 noundef %a, i32 noundef %b, <4 x i32> noundef %c) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vqdmlals_laneq_s32(i64 noundef %a, i32 noundef %b, <4 x i32> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <4 x i32> %c, i32 3 // CHECK: [[VQDMLXL:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 [[LANE]]) // CHECK: [[VQDMLXL1:%.*]] = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %a, i64 [[VQDMLXL]]) @@ -385,7 +385,7 @@ return vqdmlsls_lane_s32(a, b, c, 1); } -// CHECK-LABEL: define{{.*}} i32 @test_vqdmlslh_laneq_s16(i32 noundef %a, i16 noundef %b, <8 x i16> noundef %c) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vqdmlslh_laneq_s16(i32 noundef %a, i16 noundef %b, <8 x i16> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <8 x i16> %c, i32 7 // CHECK: [[TMP2:%.*]] = insertelement <4 x i16> poison, i16 %b, i64 0 // CHECK: [[TMP3:%.*]] = insertelement <4 x i16> poison, i16 [[LANE]], i64 0 @@ -397,7 +397,7 @@ return vqdmlslh_laneq_s16(a, b, c, 7); } -// CHECK-LABEL: define{{.*}} i64 @test_vqdmlsls_laneq_s32(i64 noundef %a, i32 noundef %b, <4 x i32> noundef %c) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vqdmlsls_laneq_s32(i64 noundef %a, i32 noundef %b, <4 x i32> noundef %c) #0 { // CHECK: [[LANE:%.*]] = extractelement <4 x i32> %c, i32 3 // CHECK: [[VQDMLXL:%.*]] = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %b, i32 [[LANE]]) // CHECK: [[VQDMLXL1:%.*]] = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %a, i64 [[VQDMLXL]]) @@ -425,7 +425,7 @@ return result; } -// CHECK-LABEL: define{{.*}} <1 x double> @test_vmulx_laneq_f64_2() #1 { +// CHECK-LABEL: define{{.*}} <1 x double> @test_vmulx_laneq_f64_2() #0 { // CHECK: [[TMP0:%.*]] = bitcast i64 4599917171378402754 to <1 x double> // CHECK: [[TMP1:%.*]] = bitcast i64 4606655882138939123 to <1 x double> // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x double> [[TMP0]], <1 x double> [[TMP1]], <2 x i32> @@ -446,6 +446,3 @@ result = vmulx_laneq_f64(arg1, arg3, 1); return result; } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128" diff --git a/clang/test/CodeGen/aarch64-neon-vget.c b/clang/test/CodeGen/aarch64-neon-vget.c --- a/clang/test/CodeGen/aarch64-neon-vget.c +++ b/clang/test/CodeGen/aarch64-neon-vget.c @@ -83,70 +83,70 @@ return vget_lane_f16(a, 1); } -// CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_u8(<16 x i8> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_u8(<16 x i8> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] uint8_t test_vgetq_lane_u8(uint8x16_t a) { return vgetq_lane_u8(a, 15); } -// CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_u16(<8 x i16> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_u16(<8 x i16> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] uint16_t test_vgetq_lane_u16(uint16x8_t a) { return vgetq_lane_u16(a, 7); } -// CHECK-LABEL: define{{.*}} i32 @test_vgetq_lane_u32(<4 x i32> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vgetq_lane_u32(<4 x i32> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] uint32_t test_vgetq_lane_u32(uint32x4_t a) { return vgetq_lane_u32(a, 3); } -// CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_s8(<16 x i8> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_s8(<16 x i8> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] int8_t test_vgetq_lane_s8(int8x16_t a) { return vgetq_lane_s8(a, 15); } -// CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_s16(<8 x i16> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_s16(<8 x i16> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] int16_t test_vgetq_lane_s16(int16x8_t a) { return vgetq_lane_s16(a, 7); } -// CHECK-LABEL: define{{.*}} i32 @test_vgetq_lane_s32(<4 x i32> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i32 @test_vgetq_lane_s32(<4 x i32> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3 // CHECK: ret i32 [[VGETQ_LANE]] int32_t test_vgetq_lane_s32(int32x4_t a) { return vgetq_lane_s32(a, 3); } -// CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_p8(<16 x i8> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_p8(<16 x i8> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15 // CHECK: ret i8 [[VGETQ_LANE]] poly8_t test_vgetq_lane_p8(poly8x16_t a) { return vgetq_lane_p8(a, 15); } -// CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_p16(<8 x i16> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_p16(<8 x i16> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7 // CHECK: ret i16 [[VGETQ_LANE]] poly16_t test_vgetq_lane_p16(poly16x8_t a) { return vgetq_lane_p16(a, 7); } -// CHECK-LABEL: define{{.*}} float @test_vgetq_lane_f32(<4 x float> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} float @test_vgetq_lane_f32(<4 x float> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %a, i32 3 // CHECK: ret float [[VGETQ_LANE]] float32_t test_vgetq_lane_f32(float32x4_t a) { return vgetq_lane_f32(a, 3); } -// CHECK-LABEL: define{{.*}} float @test_vgetq_lane_f16(<8 x half> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} float @test_vgetq_lane_f16(<8 x half> noundef %a) #0 { // CHECK: [[__REINT_244:%.*]] = alloca <8 x half>, align 16 // CHECK: [[__REINT1_244:%.*]] = alloca i16, align 2 // CHECK: store <8 x half> %a, ptr [[__REINT_244]], align 16 @@ -174,14 +174,14 @@ return vget_lane_u64(a, 0); } -// CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_s64(<2 x i64> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_s64(<2 x i64> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] int64_t test_vgetq_lane_s64(int64x2_t a) { return vgetq_lane_s64(a, 1); } -// CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_u64(<2 x i64> noundef %a) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_u64(<2 x i64> noundef %a) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] uint64_t test_vgetq_lane_u64(uint64x2_t a) { @@ -269,70 +269,70 @@ return vset_lane_f16(*a, b, 3); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_u8(i8 noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_u8(i8 noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15 // CHECK: ret <16 x i8> [[VSET_LANE]] uint8x16_t test_vsetq_lane_u8(uint8_t a, uint8x16_t b) { return vsetq_lane_u8(a, b, 15); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_u16(i16 noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_u16(i16 noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7 // CHECK: ret <8 x i16> [[VSET_LANE]] uint16x8_t test_vsetq_lane_u16(uint16_t a, uint16x8_t b) { return vsetq_lane_u16(a, b, 7); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vsetq_lane_u32(i32 noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vsetq_lane_u32(i32 noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %b, i32 %a, i32 3 // CHECK: ret <4 x i32> [[VSET_LANE]] uint32x4_t test_vsetq_lane_u32(uint32_t a, uint32x4_t b) { return vsetq_lane_u32(a, b, 3); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_s8(i8 noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_s8(i8 noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15 // CHECK: ret <16 x i8> [[VSET_LANE]] int8x16_t test_vsetq_lane_s8(int8_t a, int8x16_t b) { return vsetq_lane_s8(a, b, 15); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_s16(i16 noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_s16(i16 noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7 // CHECK: ret <8 x i16> [[VSET_LANE]] int16x8_t test_vsetq_lane_s16(int16_t a, int16x8_t b) { return vsetq_lane_s16(a, b, 7); } -// CHECK-LABEL: define{{.*}} <4 x i32> @test_vsetq_lane_s32(i32 noundef %a, <4 x i32> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x i32> @test_vsetq_lane_s32(i32 noundef %a, <4 x i32> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %b, i32 %a, i32 3 // CHECK: ret <4 x i32> [[VSET_LANE]] int32x4_t test_vsetq_lane_s32(int32_t a, int32x4_t b) { return vsetq_lane_s32(a, b, 3); } -// CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_p8(i8 noundef %a, <16 x i8> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_p8(i8 noundef %a, <16 x i8> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15 // CHECK: ret <16 x i8> [[VSET_LANE]] poly8x16_t test_vsetq_lane_p8(poly8_t a, poly8x16_t b) { return vsetq_lane_p8(a, b, 15); } -// CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_p16(i16 noundef %a, <8 x i16> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_p16(i16 noundef %a, <8 x i16> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7 // CHECK: ret <8 x i16> [[VSET_LANE]] poly16x8_t test_vsetq_lane_p16(poly16_t a, poly16x8_t b) { return vsetq_lane_p16(a, b, 7); } -// CHECK-LABEL: define{{.*}} <4 x float> @test_vsetq_lane_f32(float noundef %a, <4 x float> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <4 x float> @test_vsetq_lane_f32(float noundef %a, <4 x float> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x float> %b, float %a, i32 3 // CHECK: ret <4 x float> [[VSET_LANE]] float32x4_t test_vsetq_lane_f32(float32_t a, float32x4_t b) { return vsetq_lane_f32(a, b, 3); } -// CHECK-LABEL: define{{.*}} <8 x half> @test_vsetq_lane_f16(ptr noundef %a, <8 x half> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <8 x half> @test_vsetq_lane_f16(ptr noundef %a, <8 x half> noundef %b) #0 { // CHECK: [[__REINT_248:%.*]] = alloca half, align 2 // CHECK: [[__REINT1_248:%.*]] = alloca <8 x half>, align 16 // CHECK: [[__REINT2_248:%.*]] = alloca <8 x i16>, align 16 @@ -363,19 +363,16 @@ return vset_lane_u64(a, b, 0); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_s64(i64 noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_s64(i64 noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %b, i64 %a, i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] int64x2_t test_vsetq_lane_s64(int64_t a, int64x2_t b) { return vsetq_lane_s64(a, b, 1); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_u64(i64 noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_u64(i64 noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %b, i64 %a, i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] uint64x2_t test_vsetq_lane_u64(uint64_t a, uint64x2_t b) { return vsetq_lane_u64(a, b, 1); } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128" diff --git a/clang/test/CodeGen/aarch64-poly64.c b/clang/test/CodeGen/aarch64-poly64.c --- a/clang/test/CodeGen/aarch64-poly64.c +++ b/clang/test/CodeGen/aarch64-poly64.c @@ -14,7 +14,7 @@ return vceq_p64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vceqq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vceqq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[CMP_I:%.*]] = icmp eq <2 x i64> %a, %b // CHECK: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64> // CHECK: ret <2 x i64> [[SEXT_I]] @@ -31,7 +31,7 @@ return vtst_p64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vtstq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vtstq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP4:%.*]] = and <2 x i64> %a, %b // CHECK: [[TMP5:%.*]] = icmp ne <2 x i64> [[TMP4]], zeroinitializer // CHECK: [[VTST_I:%.*]] = sext <2 x i1> [[TMP5]] to <2 x i64> @@ -50,7 +50,7 @@ return vbsl_p64(a, b, c); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vbslq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b, <2 x i64> noundef %c) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vbslq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b, <2 x i64> noundef %c) #0 { // CHECK: [[VBSL3_I:%.*]] = and <2 x i64> %a, %b // CHECK: [[TMP3:%.*]] = xor <2 x i64> %a, // CHECK: [[VBSL4_I:%.*]] = and <2 x i64> [[TMP3]], %c @@ -67,7 +67,7 @@ return vget_lane_p64(v, 0); } -// CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_p64(<2 x i64> noundef %v) #1 { +// CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_p64(<2 x i64> noundef %v) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %v, i32 1 // CHECK: ret i64 [[VGETQ_LANE]] poly64_t test_vgetq_lane_p64(poly64x2_t v) { @@ -81,7 +81,7 @@ return vset_lane_p64(a, v, 0); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_p64(i64 noundef %a, <2 x i64> noundef %v) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_p64(i64 noundef %a, <2 x i64> noundef %v) #0 { // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %v, i64 %a, i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] poly64x2_t test_vsetq_lane_p64(poly64_t a, poly64x2_t v) { @@ -97,7 +97,7 @@ } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vcopyq_lane_p64(<2 x i64> noundef %a, <1 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vcopyq_lane_p64(<2 x i64> noundef %a, <1 x i64> noundef %b) #0 { // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %b, i32 0 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %a, i64 [[VGET_LANE]], i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] @@ -105,7 +105,7 @@ return vcopyq_lane_p64(a, 1, b, 0); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vcopyq_laneq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vcopyq_laneq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %b, i32 1 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %a, i64 [[VGETQ_LANE]], i32 1 // CHECK: ret <2 x i64> [[VSET_LANE]] @@ -126,7 +126,7 @@ poly64x1_t test_vdup_n_p64(poly64_t a) { return vdup_n_p64(a); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vdupq_n_p64(i64 noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vdupq_n_p64(i64 noundef %a) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x i64> undef, i64 %a, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x i64> [[VECINIT_I]], i64 %a, i32 1 // CHECK: ret <2 x i64> [[VECINIT1_I]] @@ -141,7 +141,7 @@ return vmov_n_p64(a); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vmovq_n_p64(i64 noundef %a) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vmovq_n_p64(i64 noundef %a) #0 { // CHECK: [[VECINIT_I:%.*]] = insertelement <2 x i64> undef, i64 %a, i32 0 // CHECK: [[VECINIT1_I:%.*]] = insertelement <2 x i64> [[VECINIT_I]], i64 %a, i32 1 // CHECK: ret <2 x i64> [[VECINIT1_I]] @@ -158,7 +158,7 @@ return vdup_lane_p64(vec, 0); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vdupq_lane_p64(<1 x i64> noundef %vec) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vdupq_lane_p64(<1 x i64> noundef %vec) #0 { // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> [[VEC:%.*]] to <8 x i8> // CHECK: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> // CHECK: [[LANE:%.*]] = shufflevector <1 x i64> [[TMP1]], <1 x i64> [[TMP1]], <2 x i32> zeroinitializer @@ -167,7 +167,7 @@ return vdupq_lane_p64(vec, 0); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vdupq_laneq_p64(<2 x i64> noundef %vec) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vdupq_laneq_p64(<2 x i64> noundef %vec) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> [[VEC:%.*]] to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> // CHECK: [[LANE:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> [[TMP1]], <2 x i32> @@ -176,7 +176,7 @@ return vdupq_laneq_p64(vec, 1); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vcombine_p64(<1 x i64> noundef %low, <1 x i64> noundef %high) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vcombine_p64(<1 x i64> noundef %low, <1 x i64> noundef %high) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <1 x i64> %low, <1 x i64> %high, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vcombine_p64(poly64x1_t low, poly64x1_t high) { @@ -190,7 +190,7 @@ return vld1_p64(ptr); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vld1q_p64(ptr noundef %ptr) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vld1q_p64(ptr noundef %ptr) #0 { // CHECK: [[TMP2:%.*]] = load <2 x i64>, ptr %ptr // CHECK: ret <2 x i64> [[TMP2]] poly64x2_t test_vld1q_p64(poly64_t const * ptr) { @@ -206,7 +206,7 @@ return vst1_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} void @test_vst1q_p64(ptr noundef %ptr, <2 x i64> noundef %val) #1 { +// CHECK-LABEL: define{{.*}} void @test_vst1q_p64(ptr noundef %ptr, <2 x i64> noundef %val) #0 { // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %val to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> // CHECK: store <2 x i64> [[TMP3]], ptr %ptr @@ -215,7 +215,7 @@ return vst1q_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x2_t @test_vld2_p64(ptr noundef %ptr) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x2_t @test_vld2_p64(ptr noundef %ptr) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[VLD2:%.*]] = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0(ptr %ptr) @@ -227,7 +227,7 @@ return vld2_p64(ptr); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x2_t @test_vld2q_p64(ptr noundef %ptr) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x2_t @test_vld2q_p64(ptr noundef %ptr) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[VLD2:%.*]] = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0(ptr %ptr) @@ -239,7 +239,7 @@ return vld2q_p64(ptr); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x3_t @test_vld3_p64(ptr noundef %ptr) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x3_t @test_vld3_p64(ptr noundef %ptr) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[VLD3:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0(ptr %ptr) @@ -251,7 +251,7 @@ return vld3_p64(ptr); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x3_t @test_vld3q_p64(ptr noundef %ptr) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x3_t @test_vld3q_p64(ptr noundef %ptr) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[VLD3:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0(ptr %ptr) @@ -263,7 +263,7 @@ return vld3q_p64(ptr); } -// CHECK-LABEL: define{{.*}} %struct.poly64x1x4_t @test_vld4_p64(ptr noundef %ptr) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x1x4_t @test_vld4_p64(ptr noundef %ptr) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[VLD4:%.*]] = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0(ptr %ptr) @@ -275,7 +275,7 @@ return vld4_p64(ptr); } -// CHECK-LABEL: define{{.*}} %struct.poly64x2x4_t @test_vld4q_p64(ptr noundef %ptr) #2 { +// CHECK-LABEL: define{{.*}} %struct.poly64x2x4_t @test_vld4q_p64(ptr noundef %ptr) #0 { // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[VLD4:%.*]] = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0(ptr %ptr) @@ -287,7 +287,7 @@ return vld4q_p64(ptr); } -// CHECK-LABEL: define{{.*}} void @test_vst2_p64(ptr noundef %ptr, [2 x <1 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2_p64(ptr noundef %ptr, [2 x <1 x i64>] %val.coerce) #0 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, ptr [[VAL]], i32 0, i32 0 @@ -309,7 +309,7 @@ return vst2_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} void @test_vst2q_p64(ptr noundef %ptr, [2 x <2 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst2q_p64(ptr noundef %ptr, [2 x <2 x i64>] %val.coerce) #0 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, ptr [[VAL]], i32 0, i32 0 @@ -331,7 +331,7 @@ return vst2q_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} void @test_vst3_p64(ptr noundef %ptr, [3 x <1 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3_p64(ptr noundef %ptr, [3 x <1 x i64>] %val.coerce) #0 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, ptr [[VAL]], i32 0, i32 0 @@ -358,7 +358,7 @@ return vst3_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} void @test_vst3q_p64(ptr noundef %ptr, [3 x <2 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst3q_p64(ptr noundef %ptr, [3 x <2 x i64>] %val.coerce) #0 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, ptr [[VAL]], i32 0, i32 0 @@ -385,7 +385,7 @@ return vst3q_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} void @test_vst4_p64(ptr noundef %ptr, [4 x <1 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4_p64(ptr noundef %ptr, [4 x <1 x i64>] %val.coerce) #0 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, ptr [[VAL]], i32 0, i32 0 @@ -417,7 +417,7 @@ return vst4_p64(ptr, val); } -// CHECK-LABEL: define{{.*}} void @test_vst4q_p64(ptr noundef %ptr, [4 x <2 x i64>] %val.coerce) #2 { +// CHECK-LABEL: define{{.*}} void @test_vst4q_p64(ptr noundef %ptr, [4 x <2 x i64>] %val.coerce) #0 { // CHECK: [[VAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, ptr [[VAL]], i32 0, i32 0 @@ -461,7 +461,7 @@ } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -472,42 +472,42 @@ return vextq_p64(a, b, 1); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vzip1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vzip1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vzip1q_p64(poly64x2_t a, poly64x2_t b) { return vzip1q_p64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vzip2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vzip2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vzip2q_p64(poly64x2_t a, poly64x2_t b) { return vzip2q_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vuzp1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vuzp1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vuzp1q_p64(poly64x2_t a, poly64x2_t b) { return vuzp1q_p64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vuzp2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vuzp2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vuzp2q_p64(poly64x2_t a, poly64x2_t b) { return vuzp2q_u64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vtrn1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vtrn1q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vtrn1q_p64(poly64x2_t a, poly64x2_t b) { return vtrn1q_p64(a, b); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vtrn2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vtrn2q_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[SHUFFLE_I:%.*]] = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> // CHECK: ret <2 x i64> [[SHUFFLE_I]] poly64x2_t test_vtrn2q_p64(poly64x2_t a, poly64x2_t b) { @@ -525,7 +525,7 @@ return vsri_n_p64(a, b, 33); } -// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsriq_n_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #1 { +// CHECK-LABEL: define{{.*}} <2 x i64> @test_vsriq_n_p64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 { // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8> // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> // CHECK: [[VSRI_N:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64> @@ -535,7 +535,3 @@ poly64x2_t test_vsriq_n_p64(poly64x2_t a, poly64x2_t b) { return vsriq_n_p64(a, b, 64); } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128" -// CHECK: attributes #2 ={{.*}}"min-legal-vector-width"="0" diff --git a/clang/test/CodeGen/arm64_vdupq_n_f64.c b/clang/test/CodeGen/arm64_vdupq_n_f64.c --- a/clang/test/CodeGen/arm64_vdupq_n_f64.c +++ b/clang/test/CodeGen/arm64_vdupq_n_f64.c @@ -48,7 +48,7 @@ return vmovq_n_f64(w); } -// CHECK-LABEL: define{{.*}} <4 x half> @test_vmov_n_f16(ptr noundef %a1) #1 { +// CHECK-LABEL: define{{.*}} <4 x half> @test_vmov_n_f16(ptr noundef %a1) #0 { // CHECK: [[TMP0:%.*]] = load half, ptr %a1, align 2 // CHECK: [[VECINIT:%.*]] = insertelement <4 x half> undef, half [[TMP0]], i32 0 // CHECK: [[VECINIT1:%.*]] = insertelement <4 x half> [[VECINIT]], half [[TMP0]], i32 1 @@ -79,6 +79,3 @@ float16x8_t test_vmovq_n_f16(float16_t *a1) { return vmovq_n_f16(*a1); } - -// CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="128" -// CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="64" diff --git a/clang/test/CodeGenCXX/arm-generated-fn-attr.cpp b/clang/test/CodeGenCXX/arm-generated-fn-attr.cpp --- a/clang/test/CodeGenCXX/arm-generated-fn-attr.cpp +++ b/clang/test/CodeGenCXX/arm-generated-fn-attr.cpp @@ -23,14 +23,12 @@ // CHECK: define {{.*}} @__cxx_global_var_init() [[ATTR1:#[0-9]+]] // CHECK: define {{.*}} @__clang_call_terminate({{.*}}) [[ATTR2:#[0-9]+]] -// CHECK: define {{.*}} @_ZTW4var1() [[ATTR3:#[0-9]+]] -// CHECK: define {{.*}} @_ZTW4var2() [[ATTR3]] +// CHECK: define {{.*}} @_ZTW4var1() [[ATTR1]] +// CHECK: define {{.*}} @_ZTW4var2() [[ATTR1]] // CHECK: define {{.*}} @__tls_init() [[ATTR1]] // CHECK-PACBTI: attributes [[ATTR1]] = { {{.*}}"target-features"="+armv8.1-m.main,+pacbti,+thumb-mode"{{.*}} } // CHECK-PACBTI: attributes [[ATTR2]] = { {{.*}}"target-features"="+armv8.1-m.main,+pacbti,+thumb-mode"{{.*}} } -// CHECK-PACBTI: attributes [[ATTR3]] = { {{.*}}"target-features"="+armv8.1-m.main,+pacbti,+thumb-mode"{{.*}} } // CHECK-NOPACBTI: attributes [[ATTR1]] = { {{.*}}"target-features"="+armv8.1-m.main,+thumb-mode,-pacbti"{{.*}} } // CHECK-NOPACBTI: attributes [[ATTR2]] = { {{.*}}"target-features"="+armv8.1-m.main,+thumb-mode,-pacbti"{{.*}} } -// CHECK-NOPACBTI: attributes [[ATTR3]] = { {{.*}}"target-features"="+armv8.1-m.main,+thumb-mode,-pacbti"{{.*}} } diff --git a/clang/test/OpenMP/amdgcn-attributes.cpp b/clang/test/OpenMP/amdgcn-attributes.cpp --- a/clang/test/OpenMP/amdgcn-attributes.cpp +++ b/clang/test/OpenMP/amdgcn-attributes.cpp @@ -32,12 +32,12 @@ return x + 1; } -// DEFAULT: attributes #0 = { convergent noinline norecurse nounwind optnone "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" } -// CPU: attributes #0 = { convergent noinline norecurse nounwind optnone "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx900" "target-features"="+16-bit-insts,+ci-insts,+dpp,+flat-address-space,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst" "uniform-work-group-size"="true" } -// NOIEEE: attributes #0 = { convergent noinline norecurse nounwind optnone "amdgpu-ieee"="false" "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-nans-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" } -// UNSAFEATOMIC: attributes #0 = { convergent noinline norecurse nounwind optnone "amdgpu-unsafe-fp-atomics"="true" "frame-pointer"="none" "kernel" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" } - -// DEFAULT: attributes #1 = { convergent mustprogress noinline nounwind optnone "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } -// CPU: attributes #1 = { convergent mustprogress noinline nounwind optnone "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx900" "target-features"="+16-bit-insts,+ci-insts,+dpp,+flat-address-space,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst" } -// NOIEEE: attributes #1 = { convergent mustprogress noinline nounwind optnone "amdgpu-ieee"="false" "frame-pointer"="none" "min-legal-vector-width"="0" "no-nans-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } -// UNSAFEATOMIC: attributes #1 = { convergent mustprogress noinline nounwind optnone "amdgpu-unsafe-fp-atomics"="true" "frame-pointer"="none" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } +// DEFAULT: attributes #0 = { convergent noinline norecurse nounwind optnone "frame-pointer"="none" "kernel" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" } +// CPU: attributes #0 = { convergent noinline norecurse nounwind optnone "frame-pointer"="none" "kernel" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx900" "target-features"="+16-bit-insts,+ci-insts,+dpp,+flat-address-space,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst" "uniform-work-group-size"="true" } +// NOIEEE: attributes #0 = { convergent noinline norecurse nounwind optnone "amdgpu-ieee"="false" "frame-pointer"="none" "kernel" "no-nans-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" } +// UNSAFEATOMIC: attributes #0 = { convergent noinline norecurse nounwind optnone "amdgpu-unsafe-fp-atomics"="true" "frame-pointer"="none" "kernel" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "uniform-work-group-size"="true" } + +// DEFAULT: attributes #1 = { convergent mustprogress noinline nounwind optnone "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } +// CPU: attributes #1 = { convergent mustprogress noinline nounwind optnone "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="gfx900" "target-features"="+16-bit-insts,+ci-insts,+dpp,+flat-address-space,+gfx8-insts,+gfx9-insts,+s-memrealtime,+s-memtime-inst" } +// NOIEEE: attributes #1 = { convergent mustprogress noinline nounwind optnone "amdgpu-ieee"="false" "frame-pointer"="none" "no-nans-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } +// UNSAFEATOMIC: attributes #1 = { convergent mustprogress noinline nounwind optnone "amdgpu-unsafe-fp-atomics"="true" "frame-pointer"="none" "no-trapping-math"="true" "stack-protector-buffer-size"="8" } diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -2232,14 +2232,6 @@ unbounded. If the optional max value is omitted then max is set to the value of min. If the attribute is not present, no assumptions are made about the range of vscale. -``"min-legal-vector-width"=""`` - This attribute indicates the minimum legal vector width required by the - calling convension. It is the maximum width of vector arguments and - returnings in the function and functions called by this function. Because - all the vectors are supposed to be legal type for compatibility. - Backends are free to ignore the attribute if they don't need to support - different maximum legal vector types or such information can be inferred by - other attributes. Call Site Attributes ----------------------