Changeset View
Changeset View
Standalone View
Standalone View
clang/test/CodeGen/aarch64-neon-ldst-one.c
Context not available. | |||||
#include <arm_neon.h> | #include <arm_neon.h> | ||||
// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_u8(i8* %a) #0 { | // CHECK-LABEL: define noundef <16 x i8> @test_vld1q_dup_u8(i8* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 | // CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 | ||||
// CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer | // CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer | ||||
Context not available. | |||||
return vld1q_dup_u8(a); | return vld1q_dup_u8(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_u16(i16* %a) #0 { | // CHECK-LABEL: define noundef <8 x i16> @test_vld1q_dup_u16(i16* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | ||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_u16(a); | return vld1q_dup_u16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i32> @test_vld1q_dup_u32(i32* %a) #0 { | // CHECK-LABEL: define noundef <4 x i32> @test_vld1q_dup_u32(i32* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | ||||
// CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_u32(a); | return vld1q_dup_u32(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_u64(i64* %a) #0 { | // CHECK-LABEL: define noundef <2 x i64> @test_vld1q_dup_u64(i64* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | ||||
// CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_u64(a); | return vld1q_dup_u64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_s8(i8* %a) #0 { | // CHECK-LABEL: define noundef <16 x i8> @test_vld1q_dup_s8(i8* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 | // CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 | ||||
// CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer | // CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer | ||||
Context not available. | |||||
return vld1q_dup_s8(a); | return vld1q_dup_s8(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_s16(i16* %a) #0 { | // CHECK-LABEL: define noundef <8 x i16> @test_vld1q_dup_s16(i16* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | ||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_s16(a); | return vld1q_dup_s16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i32> @test_vld1q_dup_s32(i32* %a) #0 { | // CHECK-LABEL: define noundef <4 x i32> @test_vld1q_dup_s32(i32* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | ||||
// CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_s32(a); | return vld1q_dup_s32(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_s64(i64* %a) #0 { | // CHECK-LABEL: define noundef <2 x i64> @test_vld1q_dup_s64(i64* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | ||||
// CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_s64(a); | return vld1q_dup_s64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x half> @test_vld1q_dup_f16(half* %a) #0 { | // CHECK-LABEL: define noundef <8 x half> @test_vld1q_dup_f16(half* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* | ||||
// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_f16(a); | return vld1q_dup_f16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x float> @test_vld1q_dup_f32(float* %a) #0 { | // CHECK-LABEL: define noundef <4 x float> @test_vld1q_dup_f32(float* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* | ||||
// CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_f32(a); | return vld1q_dup_f32(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x double> @test_vld1q_dup_f64(double* %a) #0 { | // CHECK-LABEL: define noundef <2 x double> @test_vld1q_dup_f64(double* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* | ||||
// CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_f64(a); | return vld1q_dup_f64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <16 x i8> @test_vld1q_dup_p8(i8* %a) #0 { | // CHECK-LABEL: define noundef <16 x i8> @test_vld1q_dup_p8(i8* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 | // CHECK: [[TMP1:%.*]] = insertelement <16 x i8> undef, i8 [[TMP0]], i32 0 | ||||
// CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer | // CHECK: [[LANE:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> [[TMP1]], <16 x i32> zeroinitializer | ||||
Context not available. | |||||
return vld1q_dup_p8(a); | return vld1q_dup_p8(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i16> @test_vld1q_dup_p16(i16* %a) #0 { | // CHECK-LABEL: define noundef <8 x i16> @test_vld1q_dup_p16(i16* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | ||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_p16(a); | return vld1q_dup_p16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i64> @test_vld1q_dup_p64(i64* %a) #0 { | // CHECK-LABEL: define noundef <2 x i64> @test_vld1q_dup_p64(i64* noundef %a) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | ||||
// CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | ||||
Context not available. | |||||
return vld1q_dup_p64(a); | return vld1q_dup_p64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i8> @test_vld1_dup_u8(i8* %a) #1 { | // CHECK-LABEL: define noundef <8 x i8> @test_vld1_dup_u8(i8* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 | // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 | ||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer | // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer | ||||
Context not available. | |||||
return vld1_dup_u8(a); | return vld1_dup_u8(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i16> @test_vld1_dup_u16(i16* %a) #1 { | // CHECK-LABEL: define noundef <4 x i16> @test_vld1_dup_u16(i16* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | ||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_u16(a); | return vld1_dup_u16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i32> @test_vld1_dup_u32(i32* %a) #1 { | // CHECK-LABEL: define noundef <2 x i32> @test_vld1_dup_u32(i32* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | ||||
// CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_u32(a); | return vld1_dup_u32(a); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x i64> @test_vld1_dup_u64(i64* %a) #1 { | // CHECK-LABEL: define noundef <1 x i64> @test_vld1_dup_u64(i64* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | ||||
// CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_u64(a); | return vld1_dup_u64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i8> @test_vld1_dup_s8(i8* %a) #1 { | // CHECK-LABEL: define noundef <8 x i8> @test_vld1_dup_s8(i8* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 | // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 | ||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer | // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer | ||||
Context not available. | |||||
return vld1_dup_s8(a); | return vld1_dup_s8(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i16> @test_vld1_dup_s16(i16* %a) #1 { | // CHECK-LABEL: define noundef <4 x i16> @test_vld1_dup_s16(i16* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | ||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_s16(a); | return vld1_dup_s16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i32> @test_vld1_dup_s32(i32* %a) #1 { | // CHECK-LABEL: define noundef <2 x i32> @test_vld1_dup_s32(i32* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i32* | ||||
// CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i32, i32* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_s32(a); | return vld1_dup_s32(a); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x i64> @test_vld1_dup_s64(i64* %a) #1 { | // CHECK-LABEL: define noundef <1 x i64> @test_vld1_dup_s64(i64* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | ||||
// CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_s64(a); | return vld1_dup_s64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x half> @test_vld1_dup_f16(half* %a) #1 { | // CHECK-LABEL: define noundef <4 x half> @test_vld1_dup_f16(half* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to half* | ||||
// CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load half, half* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_f16(a); | return vld1_dup_f16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x float> @test_vld1_dup_f32(float* %a) #1 { | // CHECK-LABEL: define noundef <2 x float> @test_vld1_dup_f32(float* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to float* | ||||
// CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load float, float* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_f32(a); | return vld1_dup_f32(a); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x double> @test_vld1_dup_f64(double* %a) #1 { | // CHECK-LABEL: define noundef <1 x double> @test_vld1_dup_f64(double* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to double* | ||||
// CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load double, double* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_f64(a); | return vld1_dup_f64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i8> @test_vld1_dup_p8(i8* %a) #1 { | // CHECK-LABEL: define noundef <8 x i8> @test_vld1_dup_p8(i8* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 | // CHECK: [[TMP1:%.*]] = insertelement <8 x i8> undef, i8 [[TMP0]], i32 0 | ||||
// CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer | // CHECK: [[LANE:%.*]] = shufflevector <8 x i8> [[TMP1]], <8 x i8> [[TMP1]], <8 x i32> zeroinitializer | ||||
Context not available. | |||||
return vld1_dup_p8(a); | return vld1_dup_p8(a); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i16> @test_vld1_dup_p16(i16* %a) #1 { | // CHECK-LABEL: define noundef <4 x i16> @test_vld1_dup_p16(i16* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i16* | ||||
// CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i16, i16* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_p16(a); | return vld1_dup_p16(a); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x i64> @test_vld1_dup_p64(i64* %a) #1 { | // CHECK-LABEL: define noundef <1 x i64> @test_vld1_dup_p64(i64* noundef %a) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to i64* | ||||
// CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | // CHECK: [[TMP2:%.*]] = load i64, i64* [[TMP1]] | ||||
Context not available. | |||||
return vld1_dup_p64(a); | return vld1_dup_p64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_dup_u64(i64* %a) #2 { | // CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_dup_u64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x2_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x2_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld2q_dup_u64(a); | return vld2q_dup_u64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_dup_s64(i64* %a) #2 { | // CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_dup_s64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x2_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x2_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld2q_dup_s64(a); | return vld2q_dup_s64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_dup_f64(double* %a) #2 { | // CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_dup_f64(double* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x2_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x2_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld2q_dup_f64(a); | return vld2q_dup_f64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_dup_p64(i64* %a) #2 { | // CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_dup_p64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x2_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x2_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld2q_dup_p64(a); | return vld2q_dup_p64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_dup_f64(double* %a) #2 { | // CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_dup_f64(double* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x2_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x2_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld2_dup_f64(a); | return vld2_dup_f64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_dup_p64(i64* %a) #2 { | // CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_dup_p64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x2_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x2_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld2_dup_p64(a); | return vld2_dup_p64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_dup_u64(i64* %a) #2 { | // CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_dup_u64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x3_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x3_t* [[__RET]] to i8* | ||||
Context not available. | |||||
// [{{x[0-9]+|sp}}] | // [{{x[0-9]+|sp}}] | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_dup_s64(i64* %a) #2 { | // CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_dup_s64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x3_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x3_t* [[__RET]] to i8* | ||||
Context not available. | |||||
// [{{x[0-9]+|sp}}] | // [{{x[0-9]+|sp}}] | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_dup_f64(double* %a) #2 { | // CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_dup_f64(double* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x3_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x3_t* [[__RET]] to i8* | ||||
Context not available. | |||||
// [{{x[0-9]+|sp}}] | // [{{x[0-9]+|sp}}] | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_dup_p64(i64* %a) #2 { | // CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_dup_p64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x3_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x3_t* [[__RET]] to i8* | ||||
Context not available. | |||||
// [{{x[0-9]+|sp}}] | // [{{x[0-9]+|sp}}] | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_dup_f64(double* %a) #2 { | // CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_dup_f64(double* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x3_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x3_t* [[__RET]] to i8* | ||||
Context not available. | |||||
// [{{x[0-9]+|sp}}] | // [{{x[0-9]+|sp}}] | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_dup_p64(i64* %a) #2 { | // CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_dup_p64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x3_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x3_t* [[__RET]] to i8* | ||||
Context not available. | |||||
// [{{x[0-9]+|sp}}] | // [{{x[0-9]+|sp}}] | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_dup_u64(i64* %a) #2 { | // CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_dup_u64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x4_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.uint64x2x4_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld4q_dup_u64(a); | return vld4q_dup_u64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_dup_s64(i64* %a) #2 { | // CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_dup_s64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x4_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.int64x2x4_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld4q_dup_s64(a); | return vld4q_dup_s64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_dup_f64(double* %a) #2 { | // CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_dup_f64(double* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x4_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x2x4_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld4q_dup_f64(a); | return vld4q_dup_f64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_dup_p64(i64* %a) #2 { | // CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_dup_p64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[__RET:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x4_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x2x4_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld4q_dup_p64(a); | return vld4q_dup_p64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_dup_f64(double* %a) #2 { | // CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_dup_f64(double* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[__RET:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x4_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.float64x1x4_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld4_dup_f64(a); | return vld4_dup_f64(a); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_dup_p64(i64* %a) #2 { | // CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_dup_p64(i64* noundef %a) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
// CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[__RET:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
// CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x4_t* [[__RET]] to i8* | // CHECK: [[TMP0:%.*]] = bitcast %struct.poly64x1x4_t* [[__RET]] to i8* | ||||
Context not available. | |||||
return vld4_dup_p64(a); | return vld4_dup_p64(a); | ||||
} | } | ||||
// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_u8(i8* %a, <16 x i8> %b) #0 { | // CHECK-LABEL: define noundef <16 x i8> @test_vld1q_lane_u8(i8* noundef %a, <16 x i8> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 | // CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 | ||||
// CHECK: ret <16 x i8> [[VLD1_LANE]] | // CHECK: ret <16 x i8> [[VLD1_LANE]] | ||||
Context not available. | |||||
return vld1q_lane_u8(a, b, 15); | return vld1q_lane_u8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_u16(i16* %a, <8 x i16> %b) #0 { | // CHECK-LABEL: define noundef <8 x i16> @test_vld1q_lane_u16(i16* noundef %a, <8 x i16> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | ||||
Context not available. | |||||
return vld1q_lane_u16(a, b, 7); | return vld1q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i32> @test_vld1q_lane_u32(i32* %a, <4 x i32> %b) #0 { | // CHECK-LABEL: define noundef <4 x i32> @test_vld1q_lane_u32(i32* noundef %a, <4 x i32> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | ||||
Context not available. | |||||
return vld1q_lane_u32(a, b, 3); | return vld1q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_u64(i64* %a, <2 x i64> %b) #0 { | // CHECK-LABEL: define noundef <2 x i64> @test_vld1q_lane_u64(i64* noundef %a, <2 x i64> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | ||||
Context not available. | |||||
return vld1q_lane_u64(a, b, 1); | return vld1q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_s8(i8* %a, <16 x i8> %b) #0 { | // CHECK-LABEL: define noundef <16 x i8> @test_vld1q_lane_s8(i8* noundef %a, <16 x i8> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 | // CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 | ||||
// CHECK: ret <16 x i8> [[VLD1_LANE]] | // CHECK: ret <16 x i8> [[VLD1_LANE]] | ||||
Context not available. | |||||
return vld1q_lane_s8(a, b, 15); | return vld1q_lane_s8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_s16(i16* %a, <8 x i16> %b) #0 { | // CHECK-LABEL: define noundef <8 x i16> @test_vld1q_lane_s16(i16* noundef %a, <8 x i16> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | ||||
Context not available. | |||||
return vld1q_lane_s16(a, b, 7); | return vld1q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i32> @test_vld1q_lane_s32(i32* %a, <4 x i32> %b) #0 { | // CHECK-LABEL: define noundef <4 x i32> @test_vld1q_lane_s32(i32* noundef %a, <4 x i32> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | ||||
Context not available. | |||||
return vld1q_lane_s32(a, b, 3); | return vld1q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_s64(i64* %a, <2 x i64> %b) #0 { | // CHECK-LABEL: define noundef <2 x i64> @test_vld1q_lane_s64(i64* noundef %a, <2 x i64> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | ||||
Context not available. | |||||
return vld1q_lane_s64(a, b, 1); | return vld1q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x half> @test_vld1q_lane_f16(half* %a, <8 x half> %b) #0 { | // CHECK-LABEL: define noundef <8 x half> @test_vld1q_lane_f16(half* noundef %a, <8 x half> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> | ||||
Context not available. | |||||
return vld1q_lane_f16(a, b, 7); | return vld1q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x float> @test_vld1q_lane_f32(float* %a, <4 x float> %b) #0 { | // CHECK-LABEL: define noundef <4 x float> @test_vld1q_lane_f32(float* noundef %a, <4 x float> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> | ||||
Context not available. | |||||
return vld1q_lane_f32(a, b, 3); | return vld1q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x double> @test_vld1q_lane_f64(double* %a, <2 x double> %b) #0 { | // CHECK-LABEL: define noundef <2 x double> @test_vld1q_lane_f64(double* noundef %a, <2 x double> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> | ||||
Context not available. | |||||
return vld1q_lane_f64(a, b, 1); | return vld1q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <16 x i8> @test_vld1q_lane_p8(i8* %a, <16 x i8> %b) #0 { | // CHECK-LABEL: define noundef <16 x i8> @test_vld1q_lane_p8(i8* noundef %a, <16 x i8> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 | // CHECK: [[VLD1_LANE:%.*]] = insertelement <16 x i8> %b, i8 [[TMP0]], i32 15 | ||||
// CHECK: ret <16 x i8> [[VLD1_LANE]] | // CHECK: ret <16 x i8> [[VLD1_LANE]] | ||||
Context not available. | |||||
return vld1q_lane_p8(a, b, 15); | return vld1q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i16> @test_vld1q_lane_p16(i16* %a, <8 x i16> %b) #0 { | // CHECK-LABEL: define noundef <8 x i16> @test_vld1q_lane_p16(i16* noundef %a, <8 x i16> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | ||||
Context not available. | |||||
return vld1q_lane_p16(a, b, 7); | return vld1q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i64> @test_vld1q_lane_p64(i64* %a, <2 x i64> %b) #0 { | // CHECK-LABEL: define noundef <2 x i64> @test_vld1q_lane_p64(i64* noundef %a, <2 x i64> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | ||||
Context not available. | |||||
return vld1q_lane_p64(a, b, 1); | return vld1q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i8> @test_vld1_lane_u8(i8* %a, <8 x i8> %b) #1 { | // CHECK-LABEL: define noundef <8 x i8> @test_vld1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 | // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 | ||||
// CHECK: ret <8 x i8> [[VLD1_LANE]] | // CHECK: ret <8 x i8> [[VLD1_LANE]] | ||||
Context not available. | |||||
return vld1_lane_u8(a, b, 7); | return vld1_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i16> @test_vld1_lane_u16(i16* %a, <4 x i16> %b) #1 { | // CHECK-LABEL: define noundef <4 x i16> @test_vld1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | ||||
Context not available. | |||||
return vld1_lane_u16(a, b, 3); | return vld1_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i32> @test_vld1_lane_u32(i32* %a, <2 x i32> %b) #1 { | // CHECK-LABEL: define noundef <2 x i32> @test_vld1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | ||||
Context not available. | |||||
return vld1_lane_u32(a, b, 1); | return vld1_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x i64> @test_vld1_lane_u64(i64* %a, <1 x i64> %b) #1 { | // CHECK-LABEL: define noundef <1 x i64> @test_vld1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | ||||
Context not available. | |||||
return vld1_lane_u64(a, b, 0); | return vld1_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i8> @test_vld1_lane_s8(i8* %a, <8 x i8> %b) #1 { | // CHECK-LABEL: define noundef <8 x i8> @test_vld1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 | // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 | ||||
// CHECK: ret <8 x i8> [[VLD1_LANE]] | // CHECK: ret <8 x i8> [[VLD1_LANE]] | ||||
Context not available. | |||||
return vld1_lane_s8(a, b, 7); | return vld1_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i16> @test_vld1_lane_s16(i16* %a, <4 x i16> %b) #1 { | // CHECK-LABEL: define noundef <4 x i16> @test_vld1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | ||||
Context not available. | |||||
return vld1_lane_s16(a, b, 3); | return vld1_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x i32> @test_vld1_lane_s32(i32* %a, <2 x i32> %b) #1 { | // CHECK-LABEL: define noundef <2 x i32> @test_vld1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | ||||
Context not available. | |||||
return vld1_lane_s32(a, b, 1); | return vld1_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x i64> @test_vld1_lane_s64(i64* %a, <1 x i64> %b) #1 { | // CHECK-LABEL: define noundef <1 x i64> @test_vld1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | ||||
Context not available. | |||||
return vld1_lane_s64(a, b, 0); | return vld1_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x half> @test_vld1_lane_f16(half* %a, <4 x half> %b) #1 { | // CHECK-LABEL: define noundef <4 x half> @test_vld1_lane_f16(half* noundef %a, <4 x half> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> | ||||
Context not available. | |||||
return vld1_lane_f16(a, b, 3); | return vld1_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <2 x float> @test_vld1_lane_f32(float* %a, <2 x float> %b) #1 { | // CHECK-LABEL: define noundef <2 x float> @test_vld1_lane_f32(float* noundef %a, <2 x float> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> | ||||
Context not available. | |||||
return vld1_lane_f32(a, b, 1); | return vld1_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x double> @test_vld1_lane_f64(double* %a, <1 x double> %b) #1 { | // CHECK-LABEL: define noundef <1 x double> @test_vld1_lane_f64(double* noundef %a, <1 x double> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> | ||||
Context not available. | |||||
return vld1_lane_f64(a, b, 0); | return vld1_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define <8 x i8> @test_vld1_lane_p8(i8* %a, <8 x i8> %b) #1 { | // CHECK-LABEL: define noundef <8 x i8> @test_vld1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = load i8, i8* %a | // CHECK: [[TMP0:%.*]] = load i8, i8* %a | ||||
// CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 | // CHECK: [[VLD1_LANE:%.*]] = insertelement <8 x i8> %b, i8 [[TMP0]], i32 7 | ||||
// CHECK: ret <8 x i8> [[VLD1_LANE]] | // CHECK: ret <8 x i8> [[VLD1_LANE]] | ||||
Context not available. | |||||
return vld1_lane_p8(a, b, 7); | return vld1_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define <4 x i16> @test_vld1_lane_p16(i16* %a, <4 x i16> %b) #1 { | // CHECK-LABEL: define noundef <4 x i16> @test_vld1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | ||||
Context not available. | |||||
return vld1_lane_p16(a, b, 3); | return vld1_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define <1 x i64> @test_vld1_lane_p64(i64* %a, <1 x i64> %b) #1 { | // CHECK-LABEL: define noundef <1 x i64> @test_vld1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | ||||
Context not available. | |||||
return vld1_lane_p64(a, b, 0); | return vld1_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int8x16x2_t @test_vld2q_lane_s8(i8* %ptr, [2 x <16 x i8>] %src.coerce) #2 { | // CHECK-LABEL: define %struct.int8x16x2_t @test_vld2q_lane_s8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x2_t, align 16 | ||||
// CHECK: [[SRC:%.*]] = alloca %struct.int8x16x2_t, align 16 | // CHECK: [[SRC:%.*]] = alloca %struct.int8x16x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_s8(ptr, src, 15); | return vld2q_lane_s8(ptr, src, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* %ptr, [2 x <16 x i8>] %src.coerce) #2 { | // CHECK-LABEL: define %struct.uint8x16x2_t @test_vld2q_lane_u8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x2_t, align 16 | ||||
// CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x2_t, align 16 | // CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_u8(ptr, src, 15); | return vld2q_lane_u8(ptr, src, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* %ptr, [2 x <16 x i8>] %src.coerce) #2 { | // CHECK-LABEL: define %struct.poly8x16x2_t @test_vld2q_lane_p8(i8* noundef %ptr, [2 x <16 x i8>] %src.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x2_t, align 16 | ||||
// CHECK: [[SRC:%.*]] = alloca %struct.poly8x16x2_t, align 16 | // CHECK: [[SRC:%.*]] = alloca %struct.poly8x16x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_p8(ptr, src, 15); | return vld2q_lane_p8(ptr, src, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int8x16x3_t @test_vld3q_lane_s8(i8* %ptr, [3 x <16 x i8>] %src.coerce) #2 { | // CHECK-LABEL: define %struct.int8x16x3_t @test_vld3q_lane_s8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x3_t, align 16 | ||||
// CHECK: [[SRC:%.*]] = alloca %struct.int8x16x3_t, align 16 | // CHECK: [[SRC:%.*]] = alloca %struct.int8x16x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_s8(ptr, src, 15); | return vld3q_lane_s8(ptr, src, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* %ptr, [3 x <16 x i8>] %src.coerce) #2 { | // CHECK-LABEL: define %struct.uint8x16x3_t @test_vld3q_lane_u8(i8* noundef %ptr, [3 x <16 x i8>] %src.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x3_t, align 16 | ||||
// CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x3_t, align 16 | // CHECK: [[SRC:%.*]] = alloca %struct.uint8x16x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_u8(ptr, src, 15); | return vld3q_lane_u8(ptr, src, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint16x8x2_t @test_vld2q_lane_u16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint16x8x2_t @test_vld2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_u16(a, b, 7); | return vld2q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint32x4x2_t @test_vld2q_lane_u32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint32x4x2_t @test_vld2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_u32(a, b, 3); | return vld2q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_lane_u64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint64x2x2_t @test_vld2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_u64(a, b, 1); | return vld2q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int16x8x2_t @test_vld2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int16x8x2_t @test_vld2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_s16(a, b, 7); | return vld2q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int32x4x2_t @test_vld2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int32x4x2_t @test_vld2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_s32(a, b, 3); | return vld2q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int64x2x2_t @test_vld2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_s64(a, b, 1); | return vld2q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float16x8x2_t @test_vld2q_lane_f16(half* %a, [2 x <8 x half>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float16x8x2_t @test_vld2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_f16(a, b, 7); | return vld2q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float32x4x2_t @test_vld2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float32x4x2_t @test_vld2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_f32(a, b, 3); | return vld2q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float64x2x2_t @test_vld2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_f64(a, b, 1); | return vld2q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly16x8x2_t @test_vld2q_lane_p16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly16x8x2_t @test_vld2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_p16(a, b, 7); | return vld2q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_lane_p64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly64x2x2_t @test_vld2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
Context not available. | |||||
return vld2q_lane_p64(a, b, 1); | return vld2q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint8x8x2_t @test_vld2_lane_u8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint8x8x2_t @test_vld2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_u8(a, b, 7); | return vld2_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint16x4x2_t @test_vld2_lane_u16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint16x4x2_t @test_vld2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_u16(a, b, 3); | return vld2_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint32x2x2_t @test_vld2_lane_u32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint32x2x2_t @test_vld2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_u32(a, b, 1); | return vld2_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x1x2_t @test_vld2_lane_u64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint64x1x2_t @test_vld2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_u64(a, b, 0); | return vld2_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int8x8x2_t @test_vld2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int8x8x2_t @test_vld2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_s8(a, b, 7); | return vld2_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int16x4x2_t @test_vld2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int16x4x2_t @test_vld2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_s16(a, b, 3); | return vld2_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int32x2x2_t @test_vld2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int32x2x2_t @test_vld2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_s32(a, b, 1); | return vld2_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x1x2_t @test_vld2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int64x1x2_t @test_vld2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_s64(a, b, 0); | return vld2_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float16x4x2_t @test_vld2_lane_f16(half* %a, [2 x <4 x half>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float16x4x2_t @test_vld2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_f16(a, b, 3); | return vld2_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float32x2x2_t @test_vld2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float32x2x2_t @test_vld2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_f32(a, b, 1); | return vld2_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float64x1x2_t @test_vld2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_f64(a, b, 0); | return vld2_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly8x8x2_t @test_vld2_lane_p8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly8x8x2_t @test_vld2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_p8(a, b, 7); | return vld2_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly16x4x2_t @test_vld2_lane_p16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly16x4x2_t @test_vld2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_p16(a, b, 3); | return vld2_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_lane_p64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly64x1x2_t @test_vld2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
Context not available. | |||||
return vld2_lane_p64(a, b, 0); | return vld2_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint16x8x3_t @test_vld3q_lane_u16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint16x8x3_t @test_vld3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_u16(a, b, 7); | return vld3q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint32x4x3_t @test_vld3q_lane_u32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint32x4x3_t @test_vld3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_u32(a, b, 3); | return vld3q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_lane_u64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint64x2x3_t @test_vld3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_u64(a, b, 1); | return vld3q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int16x8x3_t @test_vld3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int16x8x3_t @test_vld3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_s16(a, b, 7); | return vld3q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int32x4x3_t @test_vld3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int32x4x3_t @test_vld3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_s32(a, b, 3); | return vld3q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int64x2x3_t @test_vld3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_s64(a, b, 1); | return vld3q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float16x8x3_t @test_vld3q_lane_f16(half* %a, [3 x <8 x half>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float16x8x3_t @test_vld3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_f16(a, b, 7); | return vld3q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float32x4x3_t @test_vld3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float32x4x3_t @test_vld3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_f32(a, b, 3); | return vld3q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float64x2x3_t @test_vld3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_f64(a, b, 1); | return vld3q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly8x16x3_t @test_vld3q_lane_p8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly8x16x3_t @test_vld3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_p8(a, b, 15); | return vld3q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly16x8x3_t @test_vld3q_lane_p16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly16x8x3_t @test_vld3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_p16(a, b, 7); | return vld3q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_lane_p64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly64x2x3_t @test_vld3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
Context not available. | |||||
return vld3q_lane_p64(a, b, 1); | return vld3q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint8x8x3_t @test_vld3_lane_u8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint8x8x3_t @test_vld3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_u8(a, b, 7); | return vld3_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint16x4x3_t @test_vld3_lane_u16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint16x4x3_t @test_vld3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_u16(a, b, 3); | return vld3_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint32x2x3_t @test_vld3_lane_u32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint32x2x3_t @test_vld3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_u32(a, b, 1); | return vld3_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x1x3_t @test_vld3_lane_u64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint64x1x3_t @test_vld3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_u64(a, b, 0); | return vld3_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int8x8x3_t @test_vld3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int8x8x3_t @test_vld3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_s8(a, b, 7); | return vld3_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int16x4x3_t @test_vld3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int16x4x3_t @test_vld3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_s16(a, b, 3); | return vld3_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int32x2x3_t @test_vld3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int32x2x3_t @test_vld3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_s32(a, b, 1); | return vld3_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x1x3_t @test_vld3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int64x1x3_t @test_vld3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_s64(a, b, 0); | return vld3_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float16x4x3_t @test_vld3_lane_f16(half* %a, [3 x <4 x half>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float16x4x3_t @test_vld3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_f16(a, b, 3); | return vld3_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float32x2x3_t @test_vld3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float32x2x3_t @test_vld3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_f32(a, b, 1); | return vld3_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float64x1x3_t @test_vld3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_f64(a, b, 0); | return vld3_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly8x8x3_t @test_vld3_lane_p8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly8x8x3_t @test_vld3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_p8(a, b, 7); | return vld3_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly16x4x3_t @test_vld3_lane_p16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly16x4x3_t @test_vld3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_p16(a, b, 3); | return vld3_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_lane_p64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly64x1x3_t @test_vld3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
Context not available. | |||||
return vld3_lane_p64(a, b, 0); | return vld3_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint8x16x4_t @test_vld4q_lane_u8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint8x16x4_t @test_vld4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x16x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_u8(a, b, 15); | return vld4q_lane_u8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint16x8x4_t @test_vld4q_lane_u16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint16x8x4_t @test_vld4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x8x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_u16(a, b, 7); | return vld4q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint32x4x4_t @test_vld4q_lane_u32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint32x4x4_t @test_vld4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x4x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_u32(a, b, 3); | return vld4q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_lane_u64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint64x2x4_t @test_vld4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_u64(a, b, 1); | return vld4q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int8x16x4_t @test_vld4q_lane_s8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int8x16x4_t @test_vld4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x16x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_s8(a, b, 15); | return vld4q_lane_s8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int16x8x4_t @test_vld4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int16x8x4_t @test_vld4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x8x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_s16(a, b, 7); | return vld4q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int32x4x4_t @test_vld4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int32x4x4_t @test_vld4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x4x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_s32(a, b, 3); | return vld4q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int64x2x4_t @test_vld4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_s64(a, b, 1); | return vld4q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float16x8x4_t @test_vld4q_lane_f16(half* %a, [4 x <8 x half>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float16x8x4_t @test_vld4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x8x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_f16(a, b, 7); | return vld4q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float32x4x4_t @test_vld4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float32x4x4_t @test_vld4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x4x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_f32(a, b, 3); | return vld4q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float64x2x4_t @test_vld4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_f64(a, b, 1); | return vld4q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly8x16x4_t @test_vld4q_lane_p8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly8x16x4_t @test_vld4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x16x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_p8(a, b, 15); | return vld4q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly16x8x4_t @test_vld4q_lane_p16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly16x8x4_t @test_vld4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x8x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_p16(a, b, 7); | return vld4q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_lane_p64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly64x2x4_t @test_vld4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
Context not available. | |||||
return vld4q_lane_p64(a, b, 1); | return vld4q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint8x8x4_t @test_vld4_lane_u8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint8x8x4_t @test_vld4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint8x8x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_u8(a, b, 7); | return vld4_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint16x4x4_t @test_vld4_lane_u16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint16x4x4_t @test_vld4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint16x4x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_u16(a, b, 3); | return vld4_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint32x2x4_t @test_vld4_lane_u32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint32x2x4_t @test_vld4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint32x2x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_u32(a, b, 1); | return vld4_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.uint64x1x4_t @test_vld4_lane_u64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.uint64x1x4_t @test_vld4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.uint64x1x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_u64(a, b, 0); | return vld4_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int8x8x4_t @test_vld4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int8x8x4_t @test_vld4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int8x8x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_s8(a, b, 7); | return vld4_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int16x4x4_t @test_vld4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int16x4x4_t @test_vld4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int16x4x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_s16(a, b, 3); | return vld4_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int32x2x4_t @test_vld4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int32x2x4_t @test_vld4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int32x2x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_s32(a, b, 1); | return vld4_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.int64x1x4_t @test_vld4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.int64x1x4_t @test_vld4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.int64x1x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_s64(a, b, 0); | return vld4_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float16x4x4_t @test_vld4_lane_f16(half* %a, [4 x <4 x half>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float16x4x4_t @test_vld4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float16x4x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_f16(a, b, 3); | return vld4_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float32x2x4_t @test_vld4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float32x2x4_t @test_vld4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float32x2x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_f32(a, b, 1); | return vld4_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.float64x1x4_t @test_vld4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_f64(a, b, 0); | return vld4_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly8x8x4_t @test_vld4_lane_p8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly8x8x4_t @test_vld4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly8x8x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_p8(a, b, 7); | return vld4_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly16x4x4_t @test_vld4_lane_p16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly16x4x4_t @test_vld4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly16x4x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_p16(a, b, 3); | return vld4_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_lane_p64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define %struct.poly64x1x4_t @test_vld4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[RETVAL:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
Context not available. | |||||
return vld4_lane_p64(a, b, 0); | return vld4_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_u8(i8* %a, <16 x i8> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_u8(i8* noundef %a, <16 x i8> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 | // CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 | ||||
// CHECK: store i8 [[TMP0]], i8* %a | // CHECK: store i8 [[TMP0]], i8* %a | ||||
// CHECK: ret void | // CHECK: ret void | ||||
Context not available. | |||||
vst1q_lane_u8(a, b, 15); | vst1q_lane_u8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_u16(i16* %a, <8 x i16> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_u16(i16* noundef %a, <8 x i16> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | ||||
Context not available. | |||||
vst1q_lane_u16(a, b, 7); | vst1q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_u32(i32* %a, <4 x i32> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_u32(i32* noundef %a, <4 x i32> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | ||||
Context not available. | |||||
vst1q_lane_u32(a, b, 3); | vst1q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_u64(i64* %a, <2 x i64> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_u64(i64* noundef %a, <2 x i64> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | ||||
Context not available. | |||||
vst1q_lane_u64(a, b, 1); | vst1q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_s8(i8* %a, <16 x i8> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_s8(i8* noundef %a, <16 x i8> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 | // CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 | ||||
// CHECK: store i8 [[TMP0]], i8* %a | // CHECK: store i8 [[TMP0]], i8* %a | ||||
// CHECK: ret void | // CHECK: ret void | ||||
Context not available. | |||||
vst1q_lane_s8(a, b, 15); | vst1q_lane_s8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_s16(i16* %a, <8 x i16> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_s16(i16* noundef %a, <8 x i16> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | ||||
Context not available. | |||||
vst1q_lane_s16(a, b, 7); | vst1q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_s32(i32* %a, <4 x i32> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_s32(i32* noundef %a, <4 x i32> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32> | ||||
Context not available. | |||||
vst1q_lane_s32(a, b, 3); | vst1q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_s64(i64* %a, <2 x i64> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_s64(i64* noundef %a, <2 x i64> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | ||||
Context not available. | |||||
vst1q_lane_s64(a, b, 1); | vst1q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_f16(half* %a, <8 x half> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_f16(half* noundef %a, <8 x half> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x half> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half> | ||||
Context not available. | |||||
vst1q_lane_f16(a, b, 7); | vst1q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_f32(float* %a, <4 x float> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_f32(float* noundef %a, <4 x float> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float> | ||||
Context not available. | |||||
vst1q_lane_f32(a, b, 3); | vst1q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_f64(double* %a, <2 x double> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_f64(double* noundef %a, <2 x double> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> | ||||
Context not available. | |||||
vst1q_lane_f64(a, b, 1); | vst1q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_p8(i8* %a, <16 x i8> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_p8(i8* noundef %a, <16 x i8> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 | // CHECK: [[TMP0:%.*]] = extractelement <16 x i8> %b, i32 15 | ||||
// CHECK: store i8 [[TMP0]], i8* %a | // CHECK: store i8 [[TMP0]], i8* %a | ||||
// CHECK: ret void | // CHECK: ret void | ||||
Context not available. | |||||
vst1q_lane_p8(a, b, 15); | vst1q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_p16(i16* %a, <8 x i16> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_p16(i16* noundef %a, <8 x i16> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16> | ||||
Context not available. | |||||
vst1q_lane_p16(a, b, 7); | vst1q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1q_lane_p64(i64* %a, <2 x i64> %b) #0 { | // CHECK-LABEL: define void @test_vst1q_lane_p64(i64* noundef %a, <2 x i64> noundef %b) #0 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64> | ||||
Context not available. | |||||
vst1q_lane_p64(a, b, 1); | vst1q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_u8(i8* %a, <8 x i8> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_u8(i8* noundef %a, <8 x i8> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 | // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 | ||||
// CHECK: store i8 [[TMP0]], i8* %a | // CHECK: store i8 [[TMP0]], i8* %a | ||||
// CHECK: ret void | // CHECK: ret void | ||||
Context not available. | |||||
vst1_lane_u8(a, b, 7); | vst1_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_u16(i16* %a, <4 x i16> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_u16(i16* noundef %a, <4 x i16> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | ||||
Context not available. | |||||
vst1_lane_u16(a, b, 3); | vst1_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_u32(i32* %a, <2 x i32> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_u32(i32* noundef %a, <2 x i32> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | ||||
Context not available. | |||||
vst1_lane_u32(a, b, 1); | vst1_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_u64(i64* %a, <1 x i64> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_u64(i64* noundef %a, <1 x i64> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | ||||
Context not available. | |||||
vst1_lane_u64(a, b, 0); | vst1_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_s8(i8* %a, <8 x i8> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_s8(i8* noundef %a, <8 x i8> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 | // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 | ||||
// CHECK: store i8 [[TMP0]], i8* %a | // CHECK: store i8 [[TMP0]], i8* %a | ||||
// CHECK: ret void | // CHECK: ret void | ||||
Context not available. | |||||
vst1_lane_s8(a, b, 7); | vst1_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_s16(i16* %a, <4 x i16> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_s16(i16* noundef %a, <4 x i16> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | ||||
Context not available. | |||||
vst1_lane_s16(a, b, 3); | vst1_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_s32(i32* %a, <2 x i32> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_s32(i32* noundef %a, <2 x i32> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i32* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32> | ||||
Context not available. | |||||
vst1_lane_s32(a, b, 1); | vst1_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_s64(i64* %a, <1 x i64> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_s64(i64* noundef %a, <1 x i64> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | ||||
Context not available. | |||||
vst1_lane_s64(a, b, 0); | vst1_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_f16(half* %a, <4 x half> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_f16(half* noundef %a, <4 x half> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast half* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x half> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half> | ||||
Context not available. | |||||
vst1_lane_f16(a, b, 3); | vst1_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_f32(float* %a, <2 x float> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_f32(float* noundef %a, <2 x float> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast float* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float> | ||||
Context not available. | |||||
vst1_lane_f32(a, b, 1); | vst1_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_f64(double* %a, <1 x double> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_f64(double* noundef %a, <1 x double> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast double* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double> | ||||
Context not available. | |||||
vst1_lane_f64(a, b, 0); | vst1_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_p8(i8* %a, <8 x i8> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_p8(i8* noundef %a, <8 x i8> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 | // CHECK: [[TMP0:%.*]] = extractelement <8 x i8> %b, i32 7 | ||||
// CHECK: store i8 [[TMP0]], i8* %a | // CHECK: store i8 [[TMP0]], i8* %a | ||||
// CHECK: ret void | // CHECK: ret void | ||||
Context not available. | |||||
vst1_lane_p8(a, b, 7); | vst1_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_p16(i16* %a, <4 x i16> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_p16(i16* noundef %a, <4 x i16> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i16* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16> | ||||
Context not available. | |||||
vst1_lane_p16(a, b, 3); | vst1_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst1_lane_p64(i64* %a, <1 x i64> %b) #1 { | // CHECK-LABEL: define void @test_vst1_lane_p64(i64* noundef %a, <1 x i64> noundef %b) #1 { | ||||
// CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | // CHECK: [[TMP0:%.*]] = bitcast i64* %a to i8* | ||||
// CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8> | ||||
// CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64> | ||||
Context not available. | |||||
vst1_lane_p64(a, b, 0); | vst1_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_u8(i8* %a, [2 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_u8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint8x16x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x2_t, %struct.uint8x16x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_u8(a, b, 15); | vst2q_lane_u8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_u16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_u16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x2_t, %struct.uint16x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_u16(a, b, 7); | vst2q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_u32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_u32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint32x4x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x2_t, %struct.uint32x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_u32(a, b, 3); | vst2q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_u64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_u64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, %struct.uint64x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x2_t, %struct.uint64x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_u64(a, b, 1); | vst2q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_s8(i8* %a, [2 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_s8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int8x16x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x2_t, %struct.int8x16x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_s8(a, b, 15); | vst2q_lane_s8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_s16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_s16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_s16(a, b, 7); | vst2q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_s32(i32* %a, [2 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_s32(i32* noundef %a, [2 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int32x4x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x2_t, %struct.int32x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_s32(a, b, 3); | vst2q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_s64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_s64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x2_t, %struct.int64x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x2_t, %struct.int64x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_s64(a, b, 1); | vst2q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_f16(half* %a, [2 x <8 x half>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_f16(half* noundef %a, [2 x <8 x half>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x2_t, %struct.float16x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_f16(a, b, 7); | vst2q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_f32(float* %a, [2 x <4 x float>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_f32(float* noundef %a, [2 x <4 x float>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float32x4x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x2_t, %struct.float32x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_f32(a, b, 3); | vst2q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_f64(double* %a, [2 x <2 x double>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_f64(double* noundef %a, [2 x <2 x double>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x2_t, %struct.float64x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x2_t, %struct.float64x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_f64(a, b, 1); | vst2q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_p8(i8* %a, [2 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_p8(i8* noundef %a, [2 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly8x16x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x2_t, %struct.poly8x16x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_p8(a, b, 15); | vst2q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_p16(i16* %a, [2 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_p16(i16* noundef %a, [2 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly16x8x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x2_t, %struct.poly16x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_p16(a, b, 7); | vst2q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2q_lane_p64(i64* %a, [2 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2q_lane_p64(i64* noundef %a, [2 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x2_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, %struct.poly64x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x2_t, %struct.poly64x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2q_lane_p64(a, b, 1); | vst2q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_u8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_u8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint8x8x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, %struct.uint8x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x2_t, %struct.uint8x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_u8(a, b, 7); | vst2_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_u16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_u16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, %struct.uint16x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x2_t, %struct.uint16x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_u16(a, b, 3); | vst2_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_u32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_u32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint32x2x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, %struct.uint32x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x2_t, %struct.uint32x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_u32(a, b, 1); | vst2_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_u64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_u64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, %struct.uint64x1x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x2_t, %struct.uint64x1x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_u64(a, b, 0); | vst2_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_s8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_s8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int8x8x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, %struct.int8x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x2_t, %struct.int8x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_s8(a, b, 7); | vst2_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_s16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_s16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x2_t, %struct.int16x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x2_t, %struct.int16x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_s16(a, b, 3); | vst2_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_s32(i32* %a, [2 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_s32(i32* noundef %a, [2 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int32x2x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x2_t, %struct.int32x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x2_t, %struct.int32x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_s32(a, b, 1); | vst2_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_s64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_s64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x2_t, %struct.int64x1x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x2_t, %struct.int64x1x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_s64(a, b, 0); | vst2_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_f16(half* %a, [2 x <4 x half>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_f16(half* noundef %a, [2 x <4 x half>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x2_t, %struct.float16x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x2_t, %struct.float16x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_f16(a, b, 3); | vst2_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_f32(float* %a, [2 x <2 x float>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_f32(float* noundef %a, [2 x <2 x float>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float32x2x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x2_t, %struct.float32x2x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x2_t, %struct.float32x2x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_f32(a, b, 1); | vst2_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_f64(double* %a, [2 x <1 x double>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_f64(double* noundef %a, [2 x <1 x double>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x2_t, %struct.float64x1x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x2_t, %struct.float64x1x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_f64(a, b, 0); | vst2_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_p8(i8* %a, [2 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_p8(i8* noundef %a, [2 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly8x8x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, %struct.poly8x8x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x2_t, %struct.poly8x8x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_p8(a, b, 7); | vst2_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_p16(i16* %a, [2 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_p16(i16* noundef %a, [2 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly16x4x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, %struct.poly16x4x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x2_t, %struct.poly16x4x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_p16(a, b, 3); | vst2_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst2_lane_p64(i64* %a, [2 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst2_lane_p64(i64* noundef %a, [2 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x2_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, %struct.poly64x1x2_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x2_t, %struct.poly64x1x2_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst2_lane_p64(a, b, 0); | vst2_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_u8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_u8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint8x16x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x3_t, %struct.uint8x16x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_u8(a, b, 15); | vst3q_lane_u8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_u16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_u16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x3_t, %struct.uint16x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_u16(a, b, 7); | vst3q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_u32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_u32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint32x4x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x3_t, %struct.uint32x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_u32(a, b, 3); | vst3q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_u64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_u64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, %struct.uint64x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x3_t, %struct.uint64x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_u64(a, b, 1); | vst3q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_s8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_s8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int8x16x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x3_t, %struct.int8x16x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_s8(a, b, 15); | vst3q_lane_s8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_s16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_s16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x3_t, %struct.int16x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_s16(a, b, 7); | vst3q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_s32(i32* %a, [3 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_s32(i32* noundef %a, [3 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int32x4x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x3_t, %struct.int32x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_s32(a, b, 3); | vst3q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_s64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_s64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x3_t, %struct.int64x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x3_t, %struct.int64x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_s64(a, b, 1); | vst3q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_f16(half* %a, [3 x <8 x half>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_f16(half* noundef %a, [3 x <8 x half>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x3_t, %struct.float16x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_f16(a, b, 7); | vst3q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_f32(float* %a, [3 x <4 x float>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_f32(float* noundef %a, [3 x <4 x float>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float32x4x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x3_t, %struct.float32x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_f32(a, b, 3); | vst3q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_f64(double* %a, [3 x <2 x double>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_f64(double* noundef %a, [3 x <2 x double>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x3_t, %struct.float64x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x3_t, %struct.float64x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_f64(a, b, 1); | vst3q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_p8(i8* %a, [3 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_p8(i8* noundef %a, [3 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly8x16x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x3_t, %struct.poly8x16x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_p8(a, b, 15); | vst3q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_p16(i16* %a, [3 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_p16(i16* noundef %a, [3 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly16x8x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x3_t, %struct.poly16x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_p16(a, b, 7); | vst3q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3q_lane_p64(i64* %a, [3 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3q_lane_p64(i64* noundef %a, [3 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x3_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, %struct.poly64x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x3_t, %struct.poly64x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3q_lane_p64(a, b, 1); | vst3q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_u8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_u8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint8x8x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, %struct.uint8x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x3_t, %struct.uint8x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_u8(a, b, 7); | vst3_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_u16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_u16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, %struct.uint16x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x3_t, %struct.uint16x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_u16(a, b, 3); | vst3_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_u32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_u32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint32x2x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, %struct.uint32x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x3_t, %struct.uint32x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_u32(a, b, 1); | vst3_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_u64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_u64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, %struct.uint64x1x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x3_t, %struct.uint64x1x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_u64(a, b, 0); | vst3_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_s8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_s8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int8x8x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, %struct.int8x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x3_t, %struct.int8x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_s8(a, b, 7); | vst3_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_s16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_s16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x3_t, %struct.int16x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x3_t, %struct.int16x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_s16(a, b, 3); | vst3_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_s32(i32* %a, [3 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_s32(i32* noundef %a, [3 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int32x2x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x3_t, %struct.int32x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x3_t, %struct.int32x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_s32(a, b, 1); | vst3_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_s64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_s64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x3_t, %struct.int64x1x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x3_t, %struct.int64x1x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_s64(a, b, 0); | vst3_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_f16(half* %a, [3 x <4 x half>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_f16(half* noundef %a, [3 x <4 x half>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x3_t, %struct.float16x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x3_t, %struct.float16x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_f16(a, b, 3); | vst3_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_f32(float* %a, [3 x <2 x float>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_f32(float* noundef %a, [3 x <2 x float>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float32x2x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x3_t, %struct.float32x2x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x3_t, %struct.float32x2x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_f32(a, b, 1); | vst3_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_f64(double* %a, [3 x <1 x double>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_f64(double* noundef %a, [3 x <1 x double>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x3_t, %struct.float64x1x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x3_t, %struct.float64x1x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_f64(a, b, 0); | vst3_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_p8(i8* %a, [3 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_p8(i8* noundef %a, [3 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly8x8x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, %struct.poly8x8x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x3_t, %struct.poly8x8x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_p8(a, b, 7); | vst3_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_p16(i16* %a, [3 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_p16(i16* noundef %a, [3 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly16x4x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, %struct.poly16x4x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x3_t, %struct.poly16x4x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_p16(a, b, 3); | vst3_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst3_lane_p64(i64* %a, [3 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst3_lane_p64(i64* noundef %a, [3 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x3_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, %struct.poly64x1x3_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x3_t, %struct.poly64x1x3_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst3_lane_p64(a, b, 0); | vst3_lane_p64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_u8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_u8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint8x16x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x16x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x16x4_t, %struct.uint8x16x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_u8(a, b, 15); | vst4q_lane_u8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_u16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_u16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x8x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x8x4_t, %struct.uint16x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_u16(a, b, 7); | vst4q_lane_u16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_u32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_u32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint32x4x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x4x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x4x4_t, %struct.uint32x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_u32(a, b, 3); | vst4q_lane_u32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_u64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_u64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x2x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, %struct.uint64x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x2x4_t, %struct.uint64x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_u64(a, b, 1); | vst4q_lane_u64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_s8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_s8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int8x16x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x16x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x16x4_t, %struct.int8x16x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_s8(a, b, 15); | vst4q_lane_s8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_s16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_s16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x8x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x8x4_t, %struct.int16x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_s16(a, b, 7); | vst4q_lane_s16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_s32(i32* %a, [4 x <4 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_s32(i32* noundef %a, [4 x <4 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int32x4x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x4x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x4x4_t, %struct.int32x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_s32(a, b, 3); | vst4q_lane_s32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_s64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_s64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x2x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x4_t, %struct.int64x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x2x4_t, %struct.int64x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_s64(a, b, 1); | vst4q_lane_s64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_f16(half* %a, [4 x <8 x half>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_f16(half* noundef %a, [4 x <8 x half>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x8x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x8x4_t, %struct.float16x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_f16(a, b, 7); | vst4q_lane_f16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_f32(float* %a, [4 x <4 x float>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_f32(float* noundef %a, [4 x <4 x float>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float32x4x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x4x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x4x4_t, %struct.float32x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_f32(a, b, 3); | vst4q_lane_f32(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_f64(double* %a, [4 x <2 x double>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_f64(double* noundef %a, [4 x <2 x double>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x2x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x4_t, %struct.float64x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x2x4_t, %struct.float64x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_f64(a, b, 1); | vst4q_lane_f64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_p8(i8* %a, [4 x <16 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_p8(i8* noundef %a, [4 x <16 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly8x16x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x16x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x16x4_t, %struct.poly8x16x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_p8(a, b, 15); | vst4q_lane_p8(a, b, 15); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_p16(i16* %a, [4 x <8 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_p16(i16* noundef %a, [4 x <8 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly16x8x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x8x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x8x4_t, %struct.poly16x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_p16(a, b, 7); | vst4q_lane_p16(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4q_lane_p64(i64* %a, [4 x <2 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4q_lane_p64(i64* noundef %a, [4 x <2 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[B:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x2x4_t, align 16 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, %struct.poly64x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x2x4_t, %struct.poly64x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4q_lane_p64(a, b, 1); | vst4q_lane_p64(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_u8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_u8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint8x8x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint8x8x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, %struct.uint8x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint8x8x4_t, %struct.uint8x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_u8(a, b, 7); | vst4_lane_u8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_u16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_u16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint16x4x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, %struct.uint16x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint16x4x4_t, %struct.uint16x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_u16(a, b, 3); | vst4_lane_u16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_u32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_u32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint32x2x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint32x2x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, %struct.uint32x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint32x2x4_t, %struct.uint32x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_u32(a, b, 1); | vst4_lane_u32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_u64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_u64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.uint64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.uint64x1x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, %struct.uint64x1x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.uint64x1x4_t, %struct.uint64x1x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_u64(a, b, 0); | vst4_lane_u64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_s8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_s8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int8x8x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int8x8x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, %struct.int8x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int8x8x4_t, %struct.int8x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_s8(a, b, 7); | vst4_lane_s8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_s16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_s16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int16x4x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x4_t, %struct.int16x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int16x4x4_t, %struct.int16x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_s16(a, b, 3); | vst4_lane_s16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_s32(i32* %a, [4 x <2 x i32>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_s32(i32* noundef %a, [4 x <2 x i32>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int32x2x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int32x2x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x4_t, %struct.int32x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int32x2x4_t, %struct.int32x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_s32(a, b, 1); | vst4_lane_s32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_s64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_s64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.int64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.int64x1x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x4_t, %struct.int64x1x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.int64x1x4_t, %struct.int64x1x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_s64(a, b, 0); | vst4_lane_s64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_f16(half* %a, [4 x <4 x half>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_f16(half* noundef %a, [4 x <4 x half>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float16x4x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x4_t, %struct.float16x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float16x4x4_t, %struct.float16x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_f16(a, b, 3); | vst4_lane_f16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_f32(float* %a, [4 x <2 x float>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_f32(float* noundef %a, [4 x <2 x float>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float32x2x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float32x2x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x4_t, %struct.float32x2x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float32x2x4_t, %struct.float32x2x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_f32(a, b, 1); | vst4_lane_f32(a, b, 1); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_f64(double* %a, [4 x <1 x double>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_f64(double* noundef %a, [4 x <1 x double>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.float64x1x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x4_t, %struct.float64x1x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.float64x1x4_t, %struct.float64x1x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_f64(a, b, 0); | vst4_lane_f64(a, b, 0); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_p8(i8* %a, [4 x <8 x i8>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_p8(i8* noundef %a, [4 x <8 x i8>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly8x8x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly8x8x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, %struct.poly8x8x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly8x8x4_t, %struct.poly8x8x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_p8(a, b, 7); | vst4_lane_p8(a, b, 7); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_p16(i16* %a, [4 x <4 x i16>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_p16(i16* noundef %a, [4 x <4 x i16>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly16x4x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly16x4x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, %struct.poly16x4x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly16x4x4_t, %struct.poly16x4x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. | |||||
vst4_lane_p16(a, b, 3); | vst4_lane_p16(a, b, 3); | ||||
} | } | ||||
// CHECK-LABEL: define void @test_vst4_lane_p64(i64* %a, [4 x <1 x i64>] %b.coerce) #2 { | // CHECK-LABEL: define void @test_vst4_lane_p64(i64* noundef %a, [4 x <1 x i64>] %b.coerce) #2 { | ||||
// CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[B:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
// CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 | // CHECK: [[__S1:%.*]] = alloca %struct.poly64x1x4_t, align 8 | ||||
// CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, %struct.poly64x1x4_t* [[B]], i32 0, i32 0 | // CHECK: [[COERCE_DIVE:%.*]] = getelementptr inbounds %struct.poly64x1x4_t, %struct.poly64x1x4_t* [[B]], i32 0, i32 0 | ||||
Context not available. |