diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td --- a/clang/include/clang/Basic/arm_mve.td +++ b/clang/include/clang/Basic/arm_mve.td @@ -482,11 +482,25 @@ let pnt = PNT_None in { def : Intrinsic, NameOverride<"vcvtq_" # IScalar>; + + foreach suffix = ["a","n","p","m"] in + def : Intrinsic + (unsignedflag IScalar), $a)>, + NameOverride<"vcvt"#suffix#"q_" # IScalar>; } defm vcvtq: IntrinsicMX $a, (unsignedflag IScalar), $pred, $inactive), 1, "_" # IScalar, PNT_2Type, PNT_None>; + + foreach suffix = ["a","n","p","m"] in { + defm "vcvt"#suffix#"q" : IntrinsicMX< + IVector, (args FVector:$a, Predicate:$pred), + (IRInt<"vcvt"#suffix#"_predicated", [IVector, FVector, Predicate]> + (unsignedflag IScalar), $inactive, $a, $pred), + 1, "_" # IScalar, PNT_2Type, PNT_None>; + } } } diff --git a/clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c b/clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/arm-mve-intrinsics/vcvt_anpm.c @@ -0,0 +1,614 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s +// RUN: %clang_cc1 -DPOLYMORPHIC -triple thumbv8.1m.main-arm-none-eabi -target-feature +mve.fp -mfloat-abi hard -fallow-half-arguments-and-returns -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s + +#include + +// CHECK-LABEL: @test_vcvtaq_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +int16x8_t test_vcvtaq_s16_f16(float16x8_t a) +{ + return vcvtaq_s16_f16(a); +} + +// CHECK-LABEL: @test_vcvtaq_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +int32x4_t test_vcvtaq_s32_f32(float32x4_t a) +{ + return vcvtaq_s32_f32(a); +} + +// CHECK-LABEL: @test_vcvtaq_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +uint16x8_t test_vcvtaq_u16_f16(float16x8_t a) +{ + return vcvtaq_u16_f16(a); +} + +// CHECK-LABEL: @test_vcvtaq_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +uint32x4_t test_vcvtaq_u32_f32(float32x4_t a) +{ + return vcvtaq_u32_f32(a); +} + +// CHECK-LABEL: @test_vcvtmq_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +int16x8_t test_vcvtmq_s16_f16(float16x8_t a) +{ + return vcvtmq_s16_f16(a); +} + +// CHECK-LABEL: @test_vcvtmq_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +int32x4_t test_vcvtmq_s32_f32(float32x4_t a) +{ + return vcvtmq_s32_f32(a); +} + +// CHECK-LABEL: @test_vcvtmq_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +uint16x8_t test_vcvtmq_u16_f16(float16x8_t a) +{ + return vcvtmq_u16_f16(a); +} + +// CHECK-LABEL: @test_vcvtmq_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +uint32x4_t test_vcvtmq_u32_f32(float32x4_t a) +{ + return vcvtmq_u32_f32(a); +} + +// CHECK-LABEL: @test_vcvtnq_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +int16x8_t test_vcvtnq_s16_f16(float16x8_t a) +{ + return vcvtnq_s16_f16(a); +} + +// CHECK-LABEL: @test_vcvtnq_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +int32x4_t test_vcvtnq_s32_f32(float32x4_t a) +{ + return vcvtnq_s32_f32(a); +} + +// CHECK-LABEL: @test_vcvtnq_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +uint16x8_t test_vcvtnq_u16_f16(float16x8_t a) +{ + return vcvtnq_u16_f16(a); +} + +// CHECK-LABEL: @test_vcvtnq_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +uint32x4_t test_vcvtnq_u32_f32(float32x4_t a) +{ + return vcvtnq_u32_f32(a); +} + +// CHECK-LABEL: @test_vcvtpq_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 0, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +int16x8_t test_vcvtpq_s16_f16(float16x8_t a) +{ + return vcvtpq_s16_f16(a); +} + +// CHECK-LABEL: @test_vcvtpq_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 0, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +int32x4_t test_vcvtpq_s32_f32(float32x4_t a) +{ + return vcvtpq_s32_f32(a); +} + +// CHECK-LABEL: @test_vcvtpq_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 1, <8 x half> [[A:%.*]]) +// CHECK-NEXT: ret <8 x i16> [[TMP0]] +// +uint16x8_t test_vcvtpq_u16_f16(float16x8_t a) +{ + return vcvtpq_u16_f16(a); +} + +// CHECK-LABEL: @test_vcvtpq_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 1, <4 x float> [[A:%.*]]) +// CHECK-NEXT: ret <4 x i32> [[TMP0]] +// +uint32x4_t test_vcvtpq_u32_f32(float32x4_t a) +{ + return vcvtpq_u32_f32(a); +} + +// CHECK-LABEL: @test_vcvtaq_m_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtaq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtaq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtaq_m_s16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtaq_m_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtaq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtaq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtaq_m_s32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtaq_m_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtaq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtaq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtaq_m_u16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtaq_m_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtaq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtaq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtaq_m_u32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtmq_m_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtmq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtmq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtmq_m_s16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtmq_m_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtmq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtmq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtmq_m_s32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtmq_m_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtmq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtmq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtmq_m_u16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtmq_m_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtmq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtmq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtmq_m_u32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtnq_m_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtnq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtnq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtnq_m_s16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtnq_m_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtnq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtnq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtnq_m_s32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtnq_m_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtnq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtnq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtnq_m_u16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtnq_m_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtnq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtnq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtnq_m_u32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtpq_m_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtpq_m_s16_f16(int16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtpq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtpq_m_s16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtpq_m_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtpq_m_s32_f32(int32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtpq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtpq_m_s32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtpq_m_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtpq_m_u16_f16(uint16x8_t inactive, float16x8_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtpq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtpq_m_u16_f16(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtpq_m_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtpq_m_u32_f32(uint32x4_t inactive, float32x4_t a, mve_pred16_t p) +{ +#ifdef POLYMORPHIC + return vcvtpq_m(inactive, a, p); +#else /* POLYMORPHIC */ + return vcvtpq_m_u32_f32(inactive, a, p); +#endif /* POLYMORPHIC */ +} + +// CHECK-LABEL: @test_vcvtaq_x_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtaq_x_s16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtaq_x_s16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtaq_x_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtaq_x_s32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtaq_x_s32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtaq_x_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtaq_x_u16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtaq_x_u16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtaq_x_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtaq_x_u32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtaq_x_u32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtmq_x_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtmq_x_s16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtmq_x_s16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtmq_x_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtmq_x_s32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtmq_x_s32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtmq_x_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtmq_x_u16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtmq_x_u16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtmq_x_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtmq_x_u32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtmq_x_u32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtnq_x_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtnq_x_s16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtnq_x_s16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtnq_x_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtnq_x_s32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtnq_x_s32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtnq_x_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtnq_x_u16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtnq_x_u16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtnq_x_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtnq_x_u32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtnq_x_u32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtpq_x_s16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +int16x8_t test_vcvtpq_x_s16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtpq_x_s16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtpq_x_s32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +int32x4_t test_vcvtpq_x_s32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtpq_x_s32_f32(a, p); +} + +// CHECK-LABEL: @test_vcvtpq_x_u16_f16( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> [[A:%.*]], <8 x i1> [[TMP1]]) +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +uint16x8_t test_vcvtpq_x_u16_f16(float16x8_t a, mve_pred16_t p) +{ + return vcvtpq_x_u16_f16(a, p); +} + +// CHECK-LABEL: @test_vcvtpq_x_u32_f32( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32 +// CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]]) +// CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> [[A:%.*]], <4 x i1> [[TMP1]]) +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +uint32x4_t test_vcvtpq_x_u32_f32(float32x4_t a, mve_pred16_t p) +{ + return vcvtpq_x_u32_f32(a, p); +} + diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td --- a/llvm/include/llvm/IR/IntrinsicsARM.td +++ b/llvm/include/llvm/IR/IntrinsicsARM.td @@ -1166,6 +1166,12 @@ llvm_anyvector_ty /* predicate */, LLVMMatchType<0> /* inactive */], [IntrNoMem]>; +foreach suffix = ["a","n","p","m"] in { + defm "int_arm_mve_vcvt"#suffix: MVEMXPredicated< + [llvm_anyvector_ty /* output */], [llvm_i32_ty /* unsigned */], + [llvm_anyvector_ty /* input */], LLVMMatchType<0>, llvm_anyvector_ty>; +} + def int_arm_mve_vrintn: Intrinsic< [llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>; def int_arm_mve_vcls: Intrinsic< diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -3659,21 +3659,42 @@ let validForTailPredication = 1; } -multiclass MVE_VCVT_fp_int_anpm_multi size, bit op, - list pattern=[]> { - def a : MVE_VCVT_fp_int_anpm; - def n : MVE_VCVT_fp_int_anpm; - def p : MVE_VCVT_fp_int_anpm; - def m : MVE_VCVT_fp_int_anpm; +multiclass MVE_VCVT_fp_int_anpm_inner rm> { + def "": MVE_VCVT_fp_int_anpm; + + defvar Inst = !cast(NAME); + defvar IntrBaseName = "int_arm_mve_vcvt" # anpm; + defvar UnpredIntr = !cast(IntrBaseName); + defvar PredIntr = !cast(IntrBaseName # "_predicated"); + + let Predicates = [HasMVEFloat] in { + def : Pat<(Int.Vec (UnpredIntr (i32 Int.Unsigned), (Flt.Vec MQPR:$in))), + (Int.Vec (Inst (Flt.Vec MQPR:$in)))>; + + def : Pat<(Int.Vec (PredIntr (i32 Int.Unsigned), (Int.Vec MQPR:$inactive), + (Flt.Vec MQPR:$in), (Flt.Pred VCCR:$pred))), + (Int.Vec (Inst (Flt.Vec MQPR:$in), ARMVCCThen, + (Flt.Pred VCCR:$pred), (Int.Vec MQPR:$inactive)))>; + } +} + +multiclass MVE_VCVT_fp_int_anpm_outer { + defm a : MVE_VCVT_fp_int_anpm_inner; + defm n : MVE_VCVT_fp_int_anpm_inner; + defm p : MVE_VCVT_fp_int_anpm_inner; + defm m : MVE_VCVT_fp_int_anpm_inner; } // This defines instructions such as MVE_VCVTu16f16a, with an explicit // rounding-mode suffix on the mnemonic. The class below will define // the bare MVE_VCVTu16f16 (with implied rounding toward zero). -defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_multi<"s16.f16", 0b01, 0b0>; -defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_multi<"u16.f16", 0b01, 0b1>; -defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_multi<"s32.f32", 0b10, 0b0>; -defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_multi<"u32.f32", 0b10, 0b1>; +defm MVE_VCVTs16f16 : MVE_VCVT_fp_int_anpm_outer; +defm MVE_VCVTu16f16 : MVE_VCVT_fp_int_anpm_outer; +defm MVE_VCVTs32f32 : MVE_VCVT_fp_int_anpm_outer; +defm MVE_VCVTu32f32 : MVE_VCVT_fp_int_anpm_outer; class MVE_VCVT_fp_int size, bit toint, bit unsigned, list pattern=[]> diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt_anpm.ll @@ -0,0 +1,631 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_s16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtaq_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvta.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 0, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_s32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtaq_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvta.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 0, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_u16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtaq_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvta.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32 1, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_u32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtaq_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvta.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32 1, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_s16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtmq_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtm.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 0, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_s32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtmq_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtm.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 0, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_u16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtmq_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtm.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32 1, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_u32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtmq_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtm.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32 1, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_s16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtnq_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtn.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 0, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_s32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtnq_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtn.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 0, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_u16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtnq_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtn.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32 1, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_u32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtnq_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtn.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32 1, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_s16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtpq_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtp.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 0, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_s32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtpq_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtp.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 0, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_u16_f16(<8 x half> %a) { +; CHECK-LABEL: test_vcvtpq_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtp.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32 1, <8 x half> %a) + ret <8 x i16> %0 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_u32_f32(<4 x float> %a) { +; CHECK-LABEL: test_vcvtpq_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtp.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32 1, <4 x float> %a) + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_m_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.s16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_m_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.s32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_m_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.u16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_m_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.u32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_m_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.s16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_m_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.s32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_m_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.u16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_m_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.u32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_m_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.s16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_m_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.s32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_m_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.u16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_m_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.u32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_m_s16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_m_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.s16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_m_s32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_m_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.s32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_m_u16_f16(<8 x i16> %inactive, <8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_m_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.u16.f16 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> %inactive, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_m_u32_f32(<4 x i32> %inactive, <4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_m_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.u32.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> %inactive, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_x_s16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_x_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_x_s32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_x_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtaq_x_u16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_x_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtaq_x_u32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtaq_x_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtat.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_x_s16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_x_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_x_s32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_x_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtmq_x_u16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_x_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtmq_x_u32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtmq_x_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtmt.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_x_s16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_x_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_x_s32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_x_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtnq_x_u16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_x_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtnq_x_u32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtnq_x_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtnt.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_x_s16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_x_s16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.s16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 0, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_x_s32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_x_s32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.s32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 0, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +define arm_aapcs_vfpcc <8 x i16> @test_vcvtpq_x_u16_f16(<8 x half> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_x_u16_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.u16.f16 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %0) + %2 = tail call <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32 1, <8 x i16> undef, <8 x half> %a, <8 x i1> %1) + ret <8 x i16> %2 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vcvtpq_x_u32_f32(<4 x float> %a, i16 zeroext %p) { +; CHECK-LABEL: test_vcvtpq_x_u32_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvtpt.u32.f32 q0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32 1, <4 x i32> undef, <4 x float> %a, <4 x i1> %1) + ret <4 x i32> %2 +} + +declare <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32) +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) + +declare <8 x i16> @llvm.arm.mve.vcvta.v8i16.v8f16(i32, <8 x half>) +declare <4 x i32> @llvm.arm.mve.vcvta.v4i32.v4f32(i32, <4 x float>) +declare <8 x i16> @llvm.arm.mve.vcvtm.v8i16.v8f16(i32, <8 x half>) +declare <4 x i32> @llvm.arm.mve.vcvtm.v4i32.v4f32(i32, <4 x float>) +declare <8 x i16> @llvm.arm.mve.vcvtn.v8i16.v8f16(i32, <8 x half>) +declare <4 x i32> @llvm.arm.mve.vcvtn.v4i32.v4f32(i32, <4 x float>) +declare <8 x i16> @llvm.arm.mve.vcvtp.v8i16.v8f16(i32, <8 x half>) +declare <4 x i32> @llvm.arm.mve.vcvtp.v4i32.v4f32(i32, <4 x float>) + +declare <8 x i16> @llvm.arm.mve.vcvta.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>) +declare <4 x i32> @llvm.arm.mve.vcvta.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>) +declare <8 x i16> @llvm.arm.mve.vcvtm.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>) +declare <4 x i32> @llvm.arm.mve.vcvtm.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>) +declare <8 x i16> @llvm.arm.mve.vcvtn.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>) +declare <4 x i32> @llvm.arm.mve.vcvtn.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>) +declare <8 x i16> @llvm.arm.mve.vcvtp.predicated.v8i16.v8f16.v8i1(i32, <8 x i16>, <8 x half>, <8 x i1>) +declare <4 x i32> @llvm.arm.mve.vcvtp.predicated.v4i32.v4f32.v4i1(i32, <4 x i32>, <4 x float>, <4 x i1>)